Melo1512 commited on
Commit
ded46c3
·
verified ·
1 Parent(s): 7f59c78

End of training

Browse files
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9585338620875524,
4
+ "eval_loss": 0.11395391821861267,
5
+ "eval_runtime": 80.7513,
6
+ "eval_samples_per_second": 321.642,
7
+ "eval_steps_per_second": 5.028,
8
+ "total_flos": 5.2079419237154e+18,
9
+ "train_loss": 0.16386900280530636,
10
+ "train_runtime": 2488.4676,
11
+ "train_samples_per_second": 106.949,
12
+ "train_steps_per_second": 0.418
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "eval_accuracy": 0.9585338620875524,
4
+ "eval_loss": 0.11395391821861267,
5
+ "eval_runtime": 80.7513,
6
+ "eval_samples_per_second": 321.642,
7
+ "eval_steps_per_second": 5.028
8
+ }
runs/Dec12_16-08-21_ae1aa77fe319/events.out.tfevents.1734022304.ae1aa77fe319.236.13 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b52a06cadd7733a6a72f495f3e5ca6e2b44d967105aa2df67e8ca13a9d0cc21
3
+ size 411
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 5.0,
3
+ "total_flos": 5.2079419237154e+18,
4
+ "train_loss": 0.16386900280530636,
5
+ "train_runtime": 2488.4676,
6
+ "train_samples_per_second": 106.949,
7
+ "train_steps_per_second": 0.418
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,815 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9585338620875524,
3
+ "best_model_checkpoint": "vit-msn-small-wbc-classifier-mono-V-all/checkpoint-1040",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 1040,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04807692307692308,
13
+ "grad_norm": 4.224180221557617,
14
+ "learning_rate": 4.807692307692308e-06,
15
+ "loss": 0.5899,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.09615384615384616,
20
+ "grad_norm": 1.716335654258728,
21
+ "learning_rate": 9.615384615384616e-06,
22
+ "loss": 0.2795,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.14423076923076922,
27
+ "grad_norm": 1.9335592985153198,
28
+ "learning_rate": 1.4423076923076923e-05,
29
+ "loss": 0.2528,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.19230769230769232,
34
+ "grad_norm": 16.045499801635742,
35
+ "learning_rate": 1.923076923076923e-05,
36
+ "loss": 0.248,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.2403846153846154,
41
+ "grad_norm": 3.4744293689727783,
42
+ "learning_rate": 2.4038461538461542e-05,
43
+ "loss": 0.2172,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.28846153846153844,
48
+ "grad_norm": 7.248939514160156,
49
+ "learning_rate": 2.8846153846153845e-05,
50
+ "loss": 0.1973,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.33653846153846156,
55
+ "grad_norm": 13.428326606750488,
56
+ "learning_rate": 3.365384615384616e-05,
57
+ "loss": 0.245,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.38461538461538464,
62
+ "grad_norm": 14.730627059936523,
63
+ "learning_rate": 3.846153846153846e-05,
64
+ "loss": 0.23,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.4326923076923077,
69
+ "grad_norm": 6.623648643493652,
70
+ "learning_rate": 4.326923076923077e-05,
71
+ "loss": 0.1977,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.4807692307692308,
76
+ "grad_norm": 3.5982465744018555,
77
+ "learning_rate": 4.8076923076923084e-05,
78
+ "loss": 0.1912,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.5288461538461539,
83
+ "grad_norm": 8.891210556030273,
84
+ "learning_rate": 4.9679487179487185e-05,
85
+ "loss": 0.1757,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.5769230769230769,
90
+ "grad_norm": 16.044160842895508,
91
+ "learning_rate": 4.9145299145299147e-05,
92
+ "loss": 0.2119,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.625,
97
+ "grad_norm": 4.071866989135742,
98
+ "learning_rate": 4.8611111111111115e-05,
99
+ "loss": 0.1954,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.6730769230769231,
104
+ "grad_norm": 5.397825241088867,
105
+ "learning_rate": 4.8076923076923084e-05,
106
+ "loss": 0.1927,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.7211538461538461,
111
+ "grad_norm": 2.4581189155578613,
112
+ "learning_rate": 4.7542735042735045e-05,
113
+ "loss": 0.1921,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.7692307692307693,
118
+ "grad_norm": 4.148955345153809,
119
+ "learning_rate": 4.700854700854701e-05,
120
+ "loss": 0.1945,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.8173076923076923,
125
+ "grad_norm": 7.619889259338379,
126
+ "learning_rate": 4.6474358974358976e-05,
127
+ "loss": 0.1904,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.8653846153846154,
132
+ "grad_norm": 8.802661895751953,
133
+ "learning_rate": 4.594017094017094e-05,
134
+ "loss": 0.1801,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.9134615384615384,
139
+ "grad_norm": 1.7985179424285889,
140
+ "learning_rate": 4.5405982905982906e-05,
141
+ "loss": 0.1564,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.9615384615384616,
146
+ "grad_norm": 7.868402481079102,
147
+ "learning_rate": 4.4871794871794874e-05,
148
+ "loss": 0.1974,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 1.0,
153
+ "eval_accuracy": 0.9371655180379626,
154
+ "eval_loss": 0.16416136920452118,
155
+ "eval_runtime": 85.3977,
156
+ "eval_samples_per_second": 304.142,
157
+ "eval_steps_per_second": 4.754,
158
+ "step": 208
159
+ },
160
+ {
161
+ "epoch": 1.0096153846153846,
162
+ "grad_norm": 4.705746650695801,
163
+ "learning_rate": 4.4337606837606836e-05,
164
+ "loss": 0.203,
165
+ "step": 210
166
+ },
167
+ {
168
+ "epoch": 1.0576923076923077,
169
+ "grad_norm": 3.6939737796783447,
170
+ "learning_rate": 4.3803418803418805e-05,
171
+ "loss": 0.1752,
172
+ "step": 220
173
+ },
174
+ {
175
+ "epoch": 1.1057692307692308,
176
+ "grad_norm": 3.589677333831787,
177
+ "learning_rate": 4.326923076923077e-05,
178
+ "loss": 0.1858,
179
+ "step": 230
180
+ },
181
+ {
182
+ "epoch": 1.1538461538461537,
183
+ "grad_norm": 6.519199848175049,
184
+ "learning_rate": 4.2735042735042735e-05,
185
+ "loss": 0.1814,
186
+ "step": 240
187
+ },
188
+ {
189
+ "epoch": 1.2019230769230769,
190
+ "grad_norm": 8.503952026367188,
191
+ "learning_rate": 4.2200854700854704e-05,
192
+ "loss": 0.1783,
193
+ "step": 250
194
+ },
195
+ {
196
+ "epoch": 1.25,
197
+ "grad_norm": 2.7003586292266846,
198
+ "learning_rate": 4.166666666666667e-05,
199
+ "loss": 0.1611,
200
+ "step": 260
201
+ },
202
+ {
203
+ "epoch": 1.2980769230769231,
204
+ "grad_norm": 2.9637396335601807,
205
+ "learning_rate": 4.1132478632478634e-05,
206
+ "loss": 0.1807,
207
+ "step": 270
208
+ },
209
+ {
210
+ "epoch": 1.3461538461538463,
211
+ "grad_norm": 2.4124629497528076,
212
+ "learning_rate": 4.05982905982906e-05,
213
+ "loss": 0.1669,
214
+ "step": 280
215
+ },
216
+ {
217
+ "epoch": 1.3942307692307692,
218
+ "grad_norm": 5.0713791847229,
219
+ "learning_rate": 4.006410256410257e-05,
220
+ "loss": 0.1743,
221
+ "step": 290
222
+ },
223
+ {
224
+ "epoch": 1.4423076923076923,
225
+ "grad_norm": 2.7951672077178955,
226
+ "learning_rate": 3.952991452991453e-05,
227
+ "loss": 0.1717,
228
+ "step": 300
229
+ },
230
+ {
231
+ "epoch": 1.4903846153846154,
232
+ "grad_norm": 4.324550628662109,
233
+ "learning_rate": 3.89957264957265e-05,
234
+ "loss": 0.188,
235
+ "step": 310
236
+ },
237
+ {
238
+ "epoch": 1.5384615384615383,
239
+ "grad_norm": 2.242941379547119,
240
+ "learning_rate": 3.846153846153846e-05,
241
+ "loss": 0.1914,
242
+ "step": 320
243
+ },
244
+ {
245
+ "epoch": 1.5865384615384617,
246
+ "grad_norm": 2.958832025527954,
247
+ "learning_rate": 3.7927350427350425e-05,
248
+ "loss": 0.1527,
249
+ "step": 330
250
+ },
251
+ {
252
+ "epoch": 1.6346153846153846,
253
+ "grad_norm": 3.1927058696746826,
254
+ "learning_rate": 3.739316239316239e-05,
255
+ "loss": 0.1822,
256
+ "step": 340
257
+ },
258
+ {
259
+ "epoch": 1.6826923076923077,
260
+ "grad_norm": 5.284717082977295,
261
+ "learning_rate": 3.685897435897436e-05,
262
+ "loss": 0.1587,
263
+ "step": 350
264
+ },
265
+ {
266
+ "epoch": 1.7307692307692308,
267
+ "grad_norm": 2.2974815368652344,
268
+ "learning_rate": 3.6324786324786323e-05,
269
+ "loss": 0.1494,
270
+ "step": 360
271
+ },
272
+ {
273
+ "epoch": 1.7788461538461537,
274
+ "grad_norm": 2.8491830825805664,
275
+ "learning_rate": 3.579059829059829e-05,
276
+ "loss": 0.1556,
277
+ "step": 370
278
+ },
279
+ {
280
+ "epoch": 1.8269230769230769,
281
+ "grad_norm": 2.7790327072143555,
282
+ "learning_rate": 3.525641025641026e-05,
283
+ "loss": 0.1552,
284
+ "step": 380
285
+ },
286
+ {
287
+ "epoch": 1.875,
288
+ "grad_norm": 2.0398130416870117,
289
+ "learning_rate": 3.472222222222222e-05,
290
+ "loss": 0.1704,
291
+ "step": 390
292
+ },
293
+ {
294
+ "epoch": 1.9230769230769231,
295
+ "grad_norm": 2.2376208305358887,
296
+ "learning_rate": 3.418803418803419e-05,
297
+ "loss": 0.1697,
298
+ "step": 400
299
+ },
300
+ {
301
+ "epoch": 1.9711538461538463,
302
+ "grad_norm": 2.9672436714172363,
303
+ "learning_rate": 3.365384615384616e-05,
304
+ "loss": 0.1589,
305
+ "step": 410
306
+ },
307
+ {
308
+ "epoch": 2.0,
309
+ "eval_accuracy": 0.950833557925538,
310
+ "eval_loss": 0.1333734393119812,
311
+ "eval_runtime": 85.0927,
312
+ "eval_samples_per_second": 305.232,
313
+ "eval_steps_per_second": 4.771,
314
+ "step": 416
315
+ },
316
+ {
317
+ "epoch": 2.019230769230769,
318
+ "grad_norm": 1.8962812423706055,
319
+ "learning_rate": 3.311965811965812e-05,
320
+ "loss": 0.1603,
321
+ "step": 420
322
+ },
323
+ {
324
+ "epoch": 2.0673076923076925,
325
+ "grad_norm": 3.2972991466522217,
326
+ "learning_rate": 3.258547008547009e-05,
327
+ "loss": 0.1524,
328
+ "step": 430
329
+ },
330
+ {
331
+ "epoch": 2.1153846153846154,
332
+ "grad_norm": 2.4130704402923584,
333
+ "learning_rate": 3.205128205128206e-05,
334
+ "loss": 0.1507,
335
+ "step": 440
336
+ },
337
+ {
338
+ "epoch": 2.1634615384615383,
339
+ "grad_norm": 4.791253089904785,
340
+ "learning_rate": 3.151709401709402e-05,
341
+ "loss": 0.1717,
342
+ "step": 450
343
+ },
344
+ {
345
+ "epoch": 2.2115384615384617,
346
+ "grad_norm": 3.113145112991333,
347
+ "learning_rate": 3.098290598290599e-05,
348
+ "loss": 0.1484,
349
+ "step": 460
350
+ },
351
+ {
352
+ "epoch": 2.2596153846153846,
353
+ "grad_norm": 2.3643486499786377,
354
+ "learning_rate": 3.0448717948717947e-05,
355
+ "loss": 0.1717,
356
+ "step": 470
357
+ },
358
+ {
359
+ "epoch": 2.3076923076923075,
360
+ "grad_norm": 3.9917261600494385,
361
+ "learning_rate": 2.9914529914529915e-05,
362
+ "loss": 0.1647,
363
+ "step": 480
364
+ },
365
+ {
366
+ "epoch": 2.355769230769231,
367
+ "grad_norm": 7.72503662109375,
368
+ "learning_rate": 2.9380341880341884e-05,
369
+ "loss": 0.1506,
370
+ "step": 490
371
+ },
372
+ {
373
+ "epoch": 2.4038461538461537,
374
+ "grad_norm": 2.822436809539795,
375
+ "learning_rate": 2.8846153846153845e-05,
376
+ "loss": 0.161,
377
+ "step": 500
378
+ },
379
+ {
380
+ "epoch": 2.451923076923077,
381
+ "grad_norm": 2.8079020977020264,
382
+ "learning_rate": 2.8311965811965814e-05,
383
+ "loss": 0.132,
384
+ "step": 510
385
+ },
386
+ {
387
+ "epoch": 2.5,
388
+ "grad_norm": 2.609222650527954,
389
+ "learning_rate": 2.777777777777778e-05,
390
+ "loss": 0.1531,
391
+ "step": 520
392
+ },
393
+ {
394
+ "epoch": 2.5480769230769234,
395
+ "grad_norm": 4.114030361175537,
396
+ "learning_rate": 2.724358974358974e-05,
397
+ "loss": 0.137,
398
+ "step": 530
399
+ },
400
+ {
401
+ "epoch": 2.5961538461538463,
402
+ "grad_norm": 2.093318223953247,
403
+ "learning_rate": 2.670940170940171e-05,
404
+ "loss": 0.1394,
405
+ "step": 540
406
+ },
407
+ {
408
+ "epoch": 2.644230769230769,
409
+ "grad_norm": 2.208742618560791,
410
+ "learning_rate": 2.6175213675213678e-05,
411
+ "loss": 0.1536,
412
+ "step": 550
413
+ },
414
+ {
415
+ "epoch": 2.6923076923076925,
416
+ "grad_norm": 2.9571166038513184,
417
+ "learning_rate": 2.564102564102564e-05,
418
+ "loss": 0.1576,
419
+ "step": 560
420
+ },
421
+ {
422
+ "epoch": 2.7403846153846154,
423
+ "grad_norm": 2.523430824279785,
424
+ "learning_rate": 2.5106837606837608e-05,
425
+ "loss": 0.1486,
426
+ "step": 570
427
+ },
428
+ {
429
+ "epoch": 2.7884615384615383,
430
+ "grad_norm": 3.496368885040283,
431
+ "learning_rate": 2.4572649572649573e-05,
432
+ "loss": 0.1463,
433
+ "step": 580
434
+ },
435
+ {
436
+ "epoch": 2.8365384615384617,
437
+ "grad_norm": 2.219247579574585,
438
+ "learning_rate": 2.4038461538461542e-05,
439
+ "loss": 0.1464,
440
+ "step": 590
441
+ },
442
+ {
443
+ "epoch": 2.8846153846153846,
444
+ "grad_norm": 2.0809261798858643,
445
+ "learning_rate": 2.3504273504273504e-05,
446
+ "loss": 0.1467,
447
+ "step": 600
448
+ },
449
+ {
450
+ "epoch": 2.9326923076923075,
451
+ "grad_norm": 2.1210248470306396,
452
+ "learning_rate": 2.297008547008547e-05,
453
+ "loss": 0.1557,
454
+ "step": 610
455
+ },
456
+ {
457
+ "epoch": 2.980769230769231,
458
+ "grad_norm": 1.880336046218872,
459
+ "learning_rate": 2.2435897435897437e-05,
460
+ "loss": 0.134,
461
+ "step": 620
462
+ },
463
+ {
464
+ "epoch": 3.0,
465
+ "eval_accuracy": 0.9431332537635236,
466
+ "eval_loss": 0.14658282697200775,
467
+ "eval_runtime": 85.5913,
468
+ "eval_samples_per_second": 303.454,
469
+ "eval_steps_per_second": 4.743,
470
+ "step": 624
471
+ },
472
+ {
473
+ "epoch": 3.0288461538461537,
474
+ "grad_norm": 1.8079543113708496,
475
+ "learning_rate": 2.1901709401709402e-05,
476
+ "loss": 0.1553,
477
+ "step": 630
478
+ },
479
+ {
480
+ "epoch": 3.076923076923077,
481
+ "grad_norm": 1.4903086423873901,
482
+ "learning_rate": 2.1367521367521368e-05,
483
+ "loss": 0.1485,
484
+ "step": 640
485
+ },
486
+ {
487
+ "epoch": 3.125,
488
+ "grad_norm": 2.690070629119873,
489
+ "learning_rate": 2.0833333333333336e-05,
490
+ "loss": 0.1423,
491
+ "step": 650
492
+ },
493
+ {
494
+ "epoch": 3.173076923076923,
495
+ "grad_norm": 5.739427089691162,
496
+ "learning_rate": 2.02991452991453e-05,
497
+ "loss": 0.1471,
498
+ "step": 660
499
+ },
500
+ {
501
+ "epoch": 3.2211538461538463,
502
+ "grad_norm": 2.2277045249938965,
503
+ "learning_rate": 1.9764957264957266e-05,
504
+ "loss": 0.1476,
505
+ "step": 670
506
+ },
507
+ {
508
+ "epoch": 3.269230769230769,
509
+ "grad_norm": 1.629521369934082,
510
+ "learning_rate": 1.923076923076923e-05,
511
+ "loss": 0.1465,
512
+ "step": 680
513
+ },
514
+ {
515
+ "epoch": 3.3173076923076925,
516
+ "grad_norm": 1.9700013399124146,
517
+ "learning_rate": 1.8696581196581197e-05,
518
+ "loss": 0.1317,
519
+ "step": 690
520
+ },
521
+ {
522
+ "epoch": 3.3653846153846154,
523
+ "grad_norm": 4.258269309997559,
524
+ "learning_rate": 1.8162393162393162e-05,
525
+ "loss": 0.1329,
526
+ "step": 700
527
+ },
528
+ {
529
+ "epoch": 3.4134615384615383,
530
+ "grad_norm": 6.2838664054870605,
531
+ "learning_rate": 1.762820512820513e-05,
532
+ "loss": 0.1686,
533
+ "step": 710
534
+ },
535
+ {
536
+ "epoch": 3.4615384615384617,
537
+ "grad_norm": 4.847503185272217,
538
+ "learning_rate": 1.7094017094017095e-05,
539
+ "loss": 0.1422,
540
+ "step": 720
541
+ },
542
+ {
543
+ "epoch": 3.5096153846153846,
544
+ "grad_norm": 2.352872371673584,
545
+ "learning_rate": 1.655982905982906e-05,
546
+ "loss": 0.1517,
547
+ "step": 730
548
+ },
549
+ {
550
+ "epoch": 3.5576923076923075,
551
+ "grad_norm": 2.504417896270752,
552
+ "learning_rate": 1.602564102564103e-05,
553
+ "loss": 0.1391,
554
+ "step": 740
555
+ },
556
+ {
557
+ "epoch": 3.605769230769231,
558
+ "grad_norm": 2.708151340484619,
559
+ "learning_rate": 1.5491452991452994e-05,
560
+ "loss": 0.118,
561
+ "step": 750
562
+ },
563
+ {
564
+ "epoch": 3.6538461538461537,
565
+ "grad_norm": 2.629040002822876,
566
+ "learning_rate": 1.4957264957264958e-05,
567
+ "loss": 0.146,
568
+ "step": 760
569
+ },
570
+ {
571
+ "epoch": 3.7019230769230766,
572
+ "grad_norm": 3.0288608074188232,
573
+ "learning_rate": 1.4423076923076923e-05,
574
+ "loss": 0.1261,
575
+ "step": 770
576
+ },
577
+ {
578
+ "epoch": 3.75,
579
+ "grad_norm": 2.5658912658691406,
580
+ "learning_rate": 1.388888888888889e-05,
581
+ "loss": 0.1533,
582
+ "step": 780
583
+ },
584
+ {
585
+ "epoch": 3.7980769230769234,
586
+ "grad_norm": 2.7179951667785645,
587
+ "learning_rate": 1.3354700854700855e-05,
588
+ "loss": 0.1413,
589
+ "step": 790
590
+ },
591
+ {
592
+ "epoch": 3.8461538461538463,
593
+ "grad_norm": 1.7115960121154785,
594
+ "learning_rate": 1.282051282051282e-05,
595
+ "loss": 0.1417,
596
+ "step": 800
597
+ },
598
+ {
599
+ "epoch": 3.894230769230769,
600
+ "grad_norm": 2.939439535140991,
601
+ "learning_rate": 1.2286324786324787e-05,
602
+ "loss": 0.1311,
603
+ "step": 810
604
+ },
605
+ {
606
+ "epoch": 3.9423076923076925,
607
+ "grad_norm": 3.047938585281372,
608
+ "learning_rate": 1.1752136752136752e-05,
609
+ "loss": 0.1247,
610
+ "step": 820
611
+ },
612
+ {
613
+ "epoch": 3.9903846153846154,
614
+ "grad_norm": 2.0042459964752197,
615
+ "learning_rate": 1.1217948717948719e-05,
616
+ "loss": 0.1488,
617
+ "step": 830
618
+ },
619
+ {
620
+ "epoch": 4.0,
621
+ "eval_accuracy": 0.9566472875678589,
622
+ "eval_loss": 0.11553934961557388,
623
+ "eval_runtime": 85.0472,
624
+ "eval_samples_per_second": 305.395,
625
+ "eval_steps_per_second": 4.774,
626
+ "step": 832
627
+ },
628
+ {
629
+ "epoch": 4.038461538461538,
630
+ "grad_norm": 2.6015453338623047,
631
+ "learning_rate": 1.0683760683760684e-05,
632
+ "loss": 0.1375,
633
+ "step": 840
634
+ },
635
+ {
636
+ "epoch": 4.086538461538462,
637
+ "grad_norm": 2.7020106315612793,
638
+ "learning_rate": 1.014957264957265e-05,
639
+ "loss": 0.1389,
640
+ "step": 850
641
+ },
642
+ {
643
+ "epoch": 4.134615384615385,
644
+ "grad_norm": 2.7265429496765137,
645
+ "learning_rate": 9.615384615384616e-06,
646
+ "loss": 0.1327,
647
+ "step": 860
648
+ },
649
+ {
650
+ "epoch": 4.1826923076923075,
651
+ "grad_norm": 5.0143866539001465,
652
+ "learning_rate": 9.081196581196581e-06,
653
+ "loss": 0.1436,
654
+ "step": 870
655
+ },
656
+ {
657
+ "epoch": 4.230769230769231,
658
+ "grad_norm": 3.4509687423706055,
659
+ "learning_rate": 8.547008547008548e-06,
660
+ "loss": 0.1238,
661
+ "step": 880
662
+ },
663
+ {
664
+ "epoch": 4.278846153846154,
665
+ "grad_norm": 2.778099536895752,
666
+ "learning_rate": 8.012820512820515e-06,
667
+ "loss": 0.1219,
668
+ "step": 890
669
+ },
670
+ {
671
+ "epoch": 4.326923076923077,
672
+ "grad_norm": 2.5686230659484863,
673
+ "learning_rate": 7.478632478632479e-06,
674
+ "loss": 0.1291,
675
+ "step": 900
676
+ },
677
+ {
678
+ "epoch": 4.375,
679
+ "grad_norm": 2.001837730407715,
680
+ "learning_rate": 6.944444444444445e-06,
681
+ "loss": 0.1234,
682
+ "step": 910
683
+ },
684
+ {
685
+ "epoch": 4.423076923076923,
686
+ "grad_norm": 2.5673305988311768,
687
+ "learning_rate": 6.41025641025641e-06,
688
+ "loss": 0.1279,
689
+ "step": 920
690
+ },
691
+ {
692
+ "epoch": 4.471153846153846,
693
+ "grad_norm": 3.475268602371216,
694
+ "learning_rate": 5.876068376068376e-06,
695
+ "loss": 0.1272,
696
+ "step": 930
697
+ },
698
+ {
699
+ "epoch": 4.519230769230769,
700
+ "grad_norm": 2.018739700317383,
701
+ "learning_rate": 5.341880341880342e-06,
702
+ "loss": 0.135,
703
+ "step": 940
704
+ },
705
+ {
706
+ "epoch": 4.5673076923076925,
707
+ "grad_norm": 3.1651690006256104,
708
+ "learning_rate": 4.807692307692308e-06,
709
+ "loss": 0.1429,
710
+ "step": 950
711
+ },
712
+ {
713
+ "epoch": 4.615384615384615,
714
+ "grad_norm": 1.8769733905792236,
715
+ "learning_rate": 4.273504273504274e-06,
716
+ "loss": 0.1254,
717
+ "step": 960
718
+ },
719
+ {
720
+ "epoch": 4.663461538461538,
721
+ "grad_norm": 2.2759199142456055,
722
+ "learning_rate": 3.7393162393162394e-06,
723
+ "loss": 0.1255,
724
+ "step": 970
725
+ },
726
+ {
727
+ "epoch": 4.711538461538462,
728
+ "grad_norm": 2.8174450397491455,
729
+ "learning_rate": 3.205128205128205e-06,
730
+ "loss": 0.1325,
731
+ "step": 980
732
+ },
733
+ {
734
+ "epoch": 4.759615384615385,
735
+ "grad_norm": 3.362974166870117,
736
+ "learning_rate": 2.670940170940171e-06,
737
+ "loss": 0.1334,
738
+ "step": 990
739
+ },
740
+ {
741
+ "epoch": 4.8076923076923075,
742
+ "grad_norm": 2.34413743019104,
743
+ "learning_rate": 2.136752136752137e-06,
744
+ "loss": 0.125,
745
+ "step": 1000
746
+ },
747
+ {
748
+ "epoch": 4.855769230769231,
749
+ "grad_norm": 3.007368326187134,
750
+ "learning_rate": 1.6025641025641025e-06,
751
+ "loss": 0.1288,
752
+ "step": 1010
753
+ },
754
+ {
755
+ "epoch": 4.903846153846154,
756
+ "grad_norm": 3.8937089443206787,
757
+ "learning_rate": 1.0683760683760685e-06,
758
+ "loss": 0.1272,
759
+ "step": 1020
760
+ },
761
+ {
762
+ "epoch": 4.951923076923077,
763
+ "grad_norm": 2.394737958908081,
764
+ "learning_rate": 5.341880341880342e-07,
765
+ "loss": 0.1322,
766
+ "step": 1030
767
+ },
768
+ {
769
+ "epoch": 5.0,
770
+ "grad_norm": 1.8422698974609375,
771
+ "learning_rate": 0.0,
772
+ "loss": 0.1169,
773
+ "step": 1040
774
+ },
775
+ {
776
+ "epoch": 5.0,
777
+ "eval_accuracy": 0.9585338620875524,
778
+ "eval_loss": 0.11395391821861267,
779
+ "eval_runtime": 84.4488,
780
+ "eval_samples_per_second": 307.559,
781
+ "eval_steps_per_second": 4.808,
782
+ "step": 1040
783
+ },
784
+ {
785
+ "epoch": 5.0,
786
+ "step": 1040,
787
+ "total_flos": 5.2079419237154e+18,
788
+ "train_loss": 0.16386900280530636,
789
+ "train_runtime": 2488.4676,
790
+ "train_samples_per_second": 106.949,
791
+ "train_steps_per_second": 0.418
792
+ }
793
+ ],
794
+ "logging_steps": 10,
795
+ "max_steps": 1040,
796
+ "num_input_tokens_seen": 0,
797
+ "num_train_epochs": 5,
798
+ "save_steps": 500,
799
+ "stateful_callbacks": {
800
+ "TrainerControl": {
801
+ "args": {
802
+ "should_epoch_stop": false,
803
+ "should_evaluate": false,
804
+ "should_log": false,
805
+ "should_save": true,
806
+ "should_training_stop": true
807
+ },
808
+ "attributes": {}
809
+ }
810
+ },
811
+ "total_flos": 5.2079419237154e+18,
812
+ "train_batch_size": 64,
813
+ "trial_name": null,
814
+ "trial_params": null
815
+ }