prithivMLmods commited on
Commit
c1ab74b
·
verified ·
1 Parent(s): 51635e1

Add files using upload-large-folder tool

Browse files
README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ language:
4
+ - en
5
+ base_model:
6
+ - google/vit-base-patch16-224-in21k
7
+ pipeline_tag: image-classification
8
+ library_name: transformers
9
+ tags:
10
+ - Deepfake
11
+ - Quality
12
+ - Assess
13
+ ---
14
+ Classification report:
15
+
16
+ precision recall f1-score support
17
+
18
+ Issue In Deepfake 0.7962 0.8067 0.8014 1500
19
+ High Quality Deepfake 0.7877 0.7767 0.7822 1500
20
+
21
+ accuracy 0.7940 3000
22
+ macro avg 0.7920 0.7917 0.7918 3000
23
+ weighted avg 0.7920 0.7917 0.7918 3000
checkpoint-1420/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Issue In Deepfake",
13
+ "1": "High Quality Deepfake"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "High Quality Deepfake": 1,
20
+ "Issue In Deepfake": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.48.3"
32
+ }
checkpoint-1420/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46c4a3ae5068568cd79375874d38ad677ded7e2de776fcdc5fd41bf66db94478
3
+ size 343223968
checkpoint-1420/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e4a742cba94f22e384fbeb8a411b63ab8b7d63638d89c850167984978addc14
3
+ size 686568890
checkpoint-1420/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
checkpoint-1420/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a4a7e816f88edc081390bd956f4fabdc1b1556ad7a2397d8d820f1911c41ee
3
+ size 14244
checkpoint-1420/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f63e08fe9dc78b8349d64d2b737a4babfa26bd19b28544333582e3154e2c6c1
3
+ size 1064
checkpoint-1420/trainer_state.json ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.6275482773780823,
3
+ "best_model_checkpoint": "Mini1/checkpoint-1420",
4
+ "epoch": 20.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1420,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_accuracy": 0.5433333333333333,
14
+ "eval_loss": 0.6880869269371033,
15
+ "eval_model_preparation_time": 0.0038,
16
+ "eval_runtime": 109.431,
17
+ "eval_samples_per_second": 27.415,
18
+ "eval_steps_per_second": 1.718,
19
+ "step": 71
20
+ },
21
+ {
22
+ "epoch": 2.0,
23
+ "eval_accuracy": 0.5716666666666667,
24
+ "eval_loss": 0.6833891868591309,
25
+ "eval_model_preparation_time": 0.0038,
26
+ "eval_runtime": 106.6901,
27
+ "eval_samples_per_second": 28.119,
28
+ "eval_steps_per_second": 1.762,
29
+ "step": 142
30
+ },
31
+ {
32
+ "epoch": 3.0,
33
+ "eval_accuracy": 0.6,
34
+ "eval_loss": 0.6785082221031189,
35
+ "eval_model_preparation_time": 0.0038,
36
+ "eval_runtime": 107.3244,
37
+ "eval_samples_per_second": 27.953,
38
+ "eval_steps_per_second": 1.752,
39
+ "step": 213
40
+ },
41
+ {
42
+ "epoch": 4.0,
43
+ "eval_accuracy": 0.6186666666666667,
44
+ "eval_loss": 0.6727880835533142,
45
+ "eval_model_preparation_time": 0.0038,
46
+ "eval_runtime": 108.3357,
47
+ "eval_samples_per_second": 27.692,
48
+ "eval_steps_per_second": 1.735,
49
+ "step": 284
50
+ },
51
+ {
52
+ "epoch": 5.0,
53
+ "eval_accuracy": 0.6246666666666667,
54
+ "eval_loss": 0.6669621467590332,
55
+ "eval_model_preparation_time": 0.0038,
56
+ "eval_runtime": 108.5687,
57
+ "eval_samples_per_second": 27.632,
58
+ "eval_steps_per_second": 1.732,
59
+ "step": 355
60
+ },
61
+ {
62
+ "epoch": 6.0,
63
+ "eval_accuracy": 0.6333333333333333,
64
+ "eval_loss": 0.6616804599761963,
65
+ "eval_model_preparation_time": 0.0038,
66
+ "eval_runtime": 108.5066,
67
+ "eval_samples_per_second": 27.648,
68
+ "eval_steps_per_second": 1.733,
69
+ "step": 426
70
+ },
71
+ {
72
+ "epoch": 7.0,
73
+ "eval_accuracy": 0.638,
74
+ "eval_loss": 0.6570075154304504,
75
+ "eval_model_preparation_time": 0.0038,
76
+ "eval_runtime": 107.795,
77
+ "eval_samples_per_second": 27.831,
78
+ "eval_steps_per_second": 1.744,
79
+ "step": 497
80
+ },
81
+ {
82
+ "epoch": 7.042253521126761,
83
+ "grad_norm": 0.8555379509925842,
84
+ "learning_rate": 2.0145985401459852e-06,
85
+ "loss": 0.6709,
86
+ "step": 500
87
+ },
88
+ {
89
+ "epoch": 8.0,
90
+ "eval_accuracy": 0.6446666666666667,
91
+ "eval_loss": 0.6513518691062927,
92
+ "eval_model_preparation_time": 0.0038,
93
+ "eval_runtime": 108.562,
94
+ "eval_samples_per_second": 27.634,
95
+ "eval_steps_per_second": 1.732,
96
+ "step": 568
97
+ },
98
+ {
99
+ "epoch": 9.0,
100
+ "eval_accuracy": 0.6516666666666666,
101
+ "eval_loss": 0.6470227241516113,
102
+ "eval_model_preparation_time": 0.0038,
103
+ "eval_runtime": 107.7046,
104
+ "eval_samples_per_second": 27.854,
105
+ "eval_steps_per_second": 1.746,
106
+ "step": 639
107
+ },
108
+ {
109
+ "epoch": 10.0,
110
+ "eval_accuracy": 0.6556666666666666,
111
+ "eval_loss": 0.6425723433494568,
112
+ "eval_model_preparation_time": 0.0038,
113
+ "eval_runtime": 107.365,
114
+ "eval_samples_per_second": 27.942,
115
+ "eval_steps_per_second": 1.751,
116
+ "step": 710
117
+ },
118
+ {
119
+ "epoch": 11.0,
120
+ "eval_accuracy": 0.6573333333333333,
121
+ "eval_loss": 0.6387879848480225,
122
+ "eval_model_preparation_time": 0.0038,
123
+ "eval_runtime": 107.5926,
124
+ "eval_samples_per_second": 27.883,
125
+ "eval_steps_per_second": 1.747,
126
+ "step": 781
127
+ },
128
+ {
129
+ "epoch": 12.0,
130
+ "eval_accuracy": 0.6556666666666666,
131
+ "eval_loss": 0.6369943022727966,
132
+ "eval_model_preparation_time": 0.0038,
133
+ "eval_runtime": 109.0526,
134
+ "eval_samples_per_second": 27.51,
135
+ "eval_steps_per_second": 1.724,
136
+ "step": 852
137
+ },
138
+ {
139
+ "epoch": 13.0,
140
+ "eval_accuracy": 0.6603333333333333,
141
+ "eval_loss": 0.633726179599762,
142
+ "eval_model_preparation_time": 0.0038,
143
+ "eval_runtime": 107.7724,
144
+ "eval_samples_per_second": 27.836,
145
+ "eval_steps_per_second": 1.744,
146
+ "step": 923
147
+ },
148
+ {
149
+ "epoch": 14.0,
150
+ "eval_accuracy": 0.663,
151
+ "eval_loss": 0.6316381692886353,
152
+ "eval_model_preparation_time": 0.0038,
153
+ "eval_runtime": 107.0384,
154
+ "eval_samples_per_second": 28.027,
155
+ "eval_steps_per_second": 1.756,
156
+ "step": 994
157
+ },
158
+ {
159
+ "epoch": 14.084507042253522,
160
+ "grad_norm": 1.2652915716171265,
161
+ "learning_rate": 9.197080291970804e-07,
162
+ "loss": 0.6198,
163
+ "step": 1000
164
+ },
165
+ {
166
+ "epoch": 15.0,
167
+ "eval_accuracy": 0.6593333333333333,
168
+ "eval_loss": 0.6307070851325989,
169
+ "eval_model_preparation_time": 0.0038,
170
+ "eval_runtime": 107.5209,
171
+ "eval_samples_per_second": 27.902,
172
+ "eval_steps_per_second": 1.748,
173
+ "step": 1065
174
+ },
175
+ {
176
+ "epoch": 16.0,
177
+ "eval_accuracy": 0.6623333333333333,
178
+ "eval_loss": 0.6293432712554932,
179
+ "eval_model_preparation_time": 0.0038,
180
+ "eval_runtime": 108.0305,
181
+ "eval_samples_per_second": 27.77,
182
+ "eval_steps_per_second": 1.74,
183
+ "step": 1136
184
+ },
185
+ {
186
+ "epoch": 17.0,
187
+ "eval_accuracy": 0.6596666666666666,
188
+ "eval_loss": 0.6289345026016235,
189
+ "eval_model_preparation_time": 0.0038,
190
+ "eval_runtime": 107.1455,
191
+ "eval_samples_per_second": 27.999,
192
+ "eval_steps_per_second": 1.755,
193
+ "step": 1207
194
+ },
195
+ {
196
+ "epoch": 18.0,
197
+ "eval_accuracy": 0.6596666666666666,
198
+ "eval_loss": 0.6281149983406067,
199
+ "eval_model_preparation_time": 0.0038,
200
+ "eval_runtime": 107.4244,
201
+ "eval_samples_per_second": 27.927,
202
+ "eval_steps_per_second": 1.75,
203
+ "step": 1278
204
+ },
205
+ {
206
+ "epoch": 19.0,
207
+ "eval_accuracy": 0.6596666666666666,
208
+ "eval_loss": 0.6277201771736145,
209
+ "eval_model_preparation_time": 0.0038,
210
+ "eval_runtime": 111.4808,
211
+ "eval_samples_per_second": 26.91,
212
+ "eval_steps_per_second": 1.686,
213
+ "step": 1349
214
+ },
215
+ {
216
+ "epoch": 20.0,
217
+ "eval_accuracy": 0.6606666666666666,
218
+ "eval_loss": 0.6275482773780823,
219
+ "eval_model_preparation_time": 0.0038,
220
+ "eval_runtime": 109.0143,
221
+ "eval_samples_per_second": 27.519,
222
+ "eval_steps_per_second": 1.725,
223
+ "step": 1420
224
+ }
225
+ ],
226
+ "logging_steps": 500,
227
+ "max_steps": 1420,
228
+ "num_input_tokens_seen": 0,
229
+ "num_train_epochs": 20,
230
+ "save_steps": 500,
231
+ "stateful_callbacks": {
232
+ "TrainerControl": {
233
+ "args": {
234
+ "should_epoch_stop": false,
235
+ "should_evaluate": false,
236
+ "should_log": false,
237
+ "should_save": true,
238
+ "should_training_stop": true
239
+ },
240
+ "attributes": {}
241
+ }
242
+ },
243
+ "total_flos": 6.97427906531328e+18,
244
+ "train_batch_size": 64,
245
+ "trial_name": null,
246
+ "trial_params": null
247
+ }
checkpoint-1420/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:486b94e9fe641b88e7cc93143ba88f12ae8a24a19e9cfa852e2451bbf6270762
3
+ size 5240
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "Issue In Deepfake",
13
+ "1": "High Quality Deepfake"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "High Quality Deepfake": 1,
20
+ "Issue In Deepfake": 0
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.48.3"
32
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46c4a3ae5068568cd79375874d38ad677ded7e2de776fcdc5fd41bf66db94478
3
+ size 343223968
preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_rescale": true,
5
+ "do_resize": true,
6
+ "image_mean": [
7
+ 0.5,
8
+ 0.5,
9
+ 0.5
10
+ ],
11
+ "image_processor_type": "ViTImageProcessor",
12
+ "image_std": [
13
+ 0.5,
14
+ 0.5,
15
+ 0.5
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 224,
21
+ "width": 224
22
+ }
23
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:486b94e9fe641b88e7cc93143ba88f12ae8a24a19e9cfa852e2451bbf6270762
3
+ size 5240