saurabhy27-outcomes commited on
Commit
d0036f3
·
verified ·
1 Parent(s): 3388588

End of training

Browse files
README.md CHANGED
@@ -3,23 +3,36 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: openai/whisper-large-v3
5
  tags:
 
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
- - name: whisper-large-v3-common-n-medical-50-50
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
  should probably proofread and complete it, then remove this comment. -->
16
 
17
- # whisper-large-v3-common-n-medical-50-50
18
 
19
- This model is a fine-tuned version of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 0.3193
22
- - Wer: 5.2213
23
 
24
  ## Model description
25
 
 
3
  license: apache-2.0
4
  base_model: openai/whisper-large-v3
5
  tags:
6
+ - whisper-event
7
  - generated_from_trainer
8
+ datasets:
9
+ - OUTCOMESAI/medical_n_common_speech_corpus_50_50
10
  metrics:
11
  - wer
12
  model-index:
13
+ - name: Whisper Large V3 Common n Medical 50 50
14
+ results:
15
+ - task:
16
+ name: Automatic Speech Recognition
17
+ type: automatic-speech-recognition
18
+ dataset:
19
+ name: OUTCOMESAI/medical_n_common_speech_corpus_50_50 en
20
+ type: OUTCOMESAI/medical_n_common_speech_corpus_50_50
21
+ metrics:
22
+ - name: Wer
23
+ type: wer
24
+ value: 5.218643517767322
25
  ---
26
 
27
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
  should probably proofread and complete it, then remove this comment. -->
29
 
30
+ # Whisper Large V3 Common n Medical 50 50
31
 
32
+ This model is a fine-tuned version of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) on the OUTCOMESAI/medical_n_common_speech_corpus_50_50 en dataset.
33
  It achieves the following results on the evaluation set:
34
+ - Loss: 0.3196
35
+ - Wer: 5.2186
36
 
37
  ## Model description
38
 
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.937984496124031,
3
+ "eval_loss": 0.319580078125,
4
+ "eval_runtime": 2690.2896,
5
+ "eval_samples": 10000,
6
+ "eval_samples_per_second": 0.834,
7
+ "eval_steps_per_second": 0.026,
8
+ "eval_wer": 5.218643517767322,
9
+ "total_flos": 1.087083891827713e+21,
10
+ "train_loss": 3.86036796875,
11
+ "train_runtime": 85811.7084,
12
+ "train_samples_per_second": 3.729,
13
+ "train_steps_per_second": 0.058
14
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.937984496124031,
3
+ "eval_loss": 0.319580078125,
4
+ "eval_runtime": 2690.2896,
5
+ "eval_samples": 10000,
6
+ "eval_samples_per_second": 0.834,
7
+ "eval_steps_per_second": 0.026,
8
+ "eval_wer": 5.218643517767322
9
+ }
runs/Dec19_08-38-49_b72483eab5b9/events.out.tfevents.1734693748.b72483eab5b9.6985.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ee9f2bb6aa719844f9fe09b3ecd4c383fc61bb4673c71d487d2586a0298248e
3
+ size 406
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.937984496124031,
3
+ "total_flos": 1.087083891827713e+21,
4
+ "train_loss": 3.86036796875,
5
+ "train_runtime": 85811.7084,
6
+ "train_samples_per_second": 3.729,
7
+ "train_steps_per_second": 0.058
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 5.218643517767322,
3
+ "best_model_checkpoint": "OUTCOMESAI/whisper-large-v3-common-n-medical-50-50/checkpoint-4750",
4
+ "epoch": 1.937984496124031,
5
+ "eval_steps": 250,
6
+ "global_step": 5000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03875968992248062,
13
+ "grad_norm": 4.327393531799316,
14
+ "learning_rate": 4.123797088618779e-07,
15
+ "loss": 6.6181,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.07751937984496124,
20
+ "grad_norm": 7.569775104522705,
21
+ "learning_rate": 4.775003968157492e-07,
22
+ "loss": 5.126,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.09689922480620156,
27
+ "eval_loss": 0.369384765625,
28
+ "eval_runtime": 2703.3784,
29
+ "eval_samples_per_second": 0.83,
30
+ "eval_steps_per_second": 0.026,
31
+ "eval_wer": 5.660122234842339,
32
+ "step": 250
33
+ },
34
+ {
35
+ "epoch": 0.11627906976744186,
36
+ "grad_norm": 3.0063843727111816,
37
+ "learning_rate": 4.953684210526315e-07,
38
+ "loss": 4.8144,
39
+ "step": 300
40
+ },
41
+ {
42
+ "epoch": 0.15503875968992248,
43
+ "grad_norm": 3.107106924057007,
44
+ "learning_rate": 4.848421052631578e-07,
45
+ "loss": 4.5831,
46
+ "step": 400
47
+ },
48
+ {
49
+ "epoch": 0.1937984496124031,
50
+ "grad_norm": 2.855757236480713,
51
+ "learning_rate": 4.7431578947368417e-07,
52
+ "loss": 4.367,
53
+ "step": 500
54
+ },
55
+ {
56
+ "epoch": 0.1937984496124031,
57
+ "eval_loss": 0.358642578125,
58
+ "eval_runtime": 2810.867,
59
+ "eval_samples_per_second": 0.798,
60
+ "eval_steps_per_second": 0.025,
61
+ "eval_wer": 5.815620098436908,
62
+ "step": 500
63
+ },
64
+ {
65
+ "epoch": 0.23255813953488372,
66
+ "grad_norm": 2.5796642303466797,
67
+ "learning_rate": 4.637894736842105e-07,
68
+ "loss": 4.2067,
69
+ "step": 600
70
+ },
71
+ {
72
+ "epoch": 0.2713178294573643,
73
+ "grad_norm": 2.537674903869629,
74
+ "learning_rate": 4.532631578947368e-07,
75
+ "loss": 4.1514,
76
+ "step": 700
77
+ },
78
+ {
79
+ "epoch": 0.29069767441860467,
80
+ "eval_loss": 0.35107421875,
81
+ "eval_runtime": 2945.7156,
82
+ "eval_samples_per_second": 0.761,
83
+ "eval_steps_per_second": 0.024,
84
+ "eval_wer": 5.883903942884959,
85
+ "step": 750
86
+ },
87
+ {
88
+ "epoch": 0.31007751937984496,
89
+ "grad_norm": 2.430060386657715,
90
+ "learning_rate": 4.4273684210526315e-07,
91
+ "loss": 4.0786,
92
+ "step": 800
93
+ },
94
+ {
95
+ "epoch": 0.3488372093023256,
96
+ "grad_norm": 2.526545286178589,
97
+ "learning_rate": 4.322105263157895e-07,
98
+ "loss": 4.0004,
99
+ "step": 900
100
+ },
101
+ {
102
+ "epoch": 0.3875968992248062,
103
+ "grad_norm": 2.5485153198242188,
104
+ "learning_rate": 4.2168421052631575e-07,
105
+ "loss": 3.962,
106
+ "step": 1000
107
+ },
108
+ {
109
+ "epoch": 0.3875968992248062,
110
+ "eval_loss": 0.344970703125,
111
+ "eval_runtime": 2929.618,
112
+ "eval_samples_per_second": 0.766,
113
+ "eval_steps_per_second": 0.024,
114
+ "eval_wer": 5.780464059711179,
115
+ "step": 1000
116
+ },
117
+ {
118
+ "epoch": 0.4263565891472868,
119
+ "grad_norm": 2.46012282371521,
120
+ "learning_rate": 4.111578947368421e-07,
121
+ "loss": 3.9314,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 0.46511627906976744,
126
+ "grad_norm": 2.6299571990966797,
127
+ "learning_rate": 4.0063157894736836e-07,
128
+ "loss": 3.9038,
129
+ "step": 1200
130
+ },
131
+ {
132
+ "epoch": 0.4844961240310077,
133
+ "eval_loss": 0.34033203125,
134
+ "eval_runtime": 3171.0392,
135
+ "eval_samples_per_second": 0.707,
136
+ "eval_steps_per_second": 0.022,
137
+ "eval_wer": 6.174617340040023,
138
+ "step": 1250
139
+ },
140
+ {
141
+ "epoch": 0.5038759689922481,
142
+ "grad_norm": 2.334170341491699,
143
+ "learning_rate": 3.9010526315789474e-07,
144
+ "loss": 3.8762,
145
+ "step": 1300
146
+ },
147
+ {
148
+ "epoch": 0.5426356589147286,
149
+ "grad_norm": 2.6218087673187256,
150
+ "learning_rate": 3.7957894736842106e-07,
151
+ "loss": 3.8491,
152
+ "step": 1400
153
+ },
154
+ {
155
+ "epoch": 0.5813953488372093,
156
+ "grad_norm": 2.390178680419922,
157
+ "learning_rate": 3.6905263157894734e-07,
158
+ "loss": 3.8313,
159
+ "step": 1500
160
+ },
161
+ {
162
+ "epoch": 0.5813953488372093,
163
+ "eval_loss": 0.3359375,
164
+ "eval_runtime": 3066.5551,
165
+ "eval_samples_per_second": 0.731,
166
+ "eval_steps_per_second": 0.023,
167
+ "eval_wer": 5.973822272702688,
168
+ "step": 1500
169
+ },
170
+ {
171
+ "epoch": 0.6201550387596899,
172
+ "grad_norm": 2.461087465286255,
173
+ "learning_rate": 3.5852631578947367e-07,
174
+ "loss": 3.804,
175
+ "step": 1600
176
+ },
177
+ {
178
+ "epoch": 0.6589147286821705,
179
+ "grad_norm": 2.4371871948242188,
180
+ "learning_rate": 3.4799999999999994e-07,
181
+ "loss": 3.7778,
182
+ "step": 1700
183
+ },
184
+ {
185
+ "epoch": 0.6782945736434108,
186
+ "eval_loss": 0.333251953125,
187
+ "eval_runtime": 3069.4158,
188
+ "eval_samples_per_second": 0.731,
189
+ "eval_steps_per_second": 0.023,
190
+ "eval_wer": 5.921764292281898,
191
+ "step": 1750
192
+ },
193
+ {
194
+ "epoch": 0.6976744186046512,
195
+ "grad_norm": 2.413174867630005,
196
+ "learning_rate": 3.374736842105263e-07,
197
+ "loss": 3.7737,
198
+ "step": 1800
199
+ },
200
+ {
201
+ "epoch": 0.7364341085271318,
202
+ "grad_norm": 2.4858949184417725,
203
+ "learning_rate": 3.2694736842105265e-07,
204
+ "loss": 3.7538,
205
+ "step": 1900
206
+ },
207
+ {
208
+ "epoch": 0.7751937984496124,
209
+ "grad_norm": 2.6870527267456055,
210
+ "learning_rate": 3.164210526315789e-07,
211
+ "loss": 3.7421,
212
+ "step": 2000
213
+ },
214
+ {
215
+ "epoch": 0.7751937984496124,
216
+ "eval_loss": 0.33056640625,
217
+ "eval_runtime": 3182.1093,
218
+ "eval_samples_per_second": 0.705,
219
+ "eval_steps_per_second": 0.022,
220
+ "eval_wer": 6.13270052463627,
221
+ "step": 2000
222
+ },
223
+ {
224
+ "epoch": 0.813953488372093,
225
+ "grad_norm": 2.504830837249756,
226
+ "learning_rate": 3.0589473684210525e-07,
227
+ "loss": 3.7371,
228
+ "step": 2100
229
+ },
230
+ {
231
+ "epoch": 0.8527131782945736,
232
+ "grad_norm": 2.3637797832489014,
233
+ "learning_rate": 2.953684210526315e-07,
234
+ "loss": 3.7367,
235
+ "step": 2200
236
+ },
237
+ {
238
+ "epoch": 0.872093023255814,
239
+ "eval_loss": 0.328125,
240
+ "eval_runtime": 2953.0944,
241
+ "eval_samples_per_second": 0.76,
242
+ "eval_steps_per_second": 0.024,
243
+ "eval_wer": 5.656065768835524,
244
+ "step": 2250
245
+ },
246
+ {
247
+ "epoch": 0.8914728682170543,
248
+ "grad_norm": 2.3057029247283936,
249
+ "learning_rate": 2.848421052631579e-07,
250
+ "loss": 3.7176,
251
+ "step": 2300
252
+ },
253
+ {
254
+ "epoch": 0.9302325581395349,
255
+ "grad_norm": 2.277510643005371,
256
+ "learning_rate": 2.7431578947368423e-07,
257
+ "loss": 3.7003,
258
+ "step": 2400
259
+ },
260
+ {
261
+ "epoch": 0.9689922480620154,
262
+ "grad_norm": 2.3119587898254395,
263
+ "learning_rate": 2.637894736842105e-07,
264
+ "loss": 3.6878,
265
+ "step": 2500
266
+ },
267
+ {
268
+ "epoch": 0.9689922480620154,
269
+ "eval_loss": 0.32568359375,
270
+ "eval_runtime": 2931.2759,
271
+ "eval_samples_per_second": 0.765,
272
+ "eval_steps_per_second": 0.024,
273
+ "eval_wer": 5.515441613932608,
274
+ "step": 2500
275
+ },
276
+ {
277
+ "epoch": 1.0077519379844961,
278
+ "grad_norm": 2.32456636428833,
279
+ "learning_rate": 2.5326315789473683e-07,
280
+ "loss": 3.6732,
281
+ "step": 2600
282
+ },
283
+ {
284
+ "epoch": 1.0465116279069768,
285
+ "grad_norm": 2.489800453186035,
286
+ "learning_rate": 2.4273684210526316e-07,
287
+ "loss": 3.6769,
288
+ "step": 2700
289
+ },
290
+ {
291
+ "epoch": 1.0658914728682172,
292
+ "eval_loss": 0.32421875,
293
+ "eval_runtime": 2928.9686,
294
+ "eval_samples_per_second": 0.766,
295
+ "eval_steps_per_second": 0.024,
296
+ "eval_wer": 5.48028557520688,
297
+ "step": 2750
298
+ },
299
+ {
300
+ "epoch": 1.0852713178294573,
301
+ "grad_norm": 2.307305097579956,
302
+ "learning_rate": 2.3221052631578946e-07,
303
+ "loss": 3.6646,
304
+ "step": 2800
305
+ },
306
+ {
307
+ "epoch": 1.124031007751938,
308
+ "grad_norm": 2.3552660942077637,
309
+ "learning_rate": 2.216842105263158e-07,
310
+ "loss": 3.6604,
311
+ "step": 2900
312
+ },
313
+ {
314
+ "epoch": 1.1627906976744187,
315
+ "grad_norm": 2.3546299934387207,
316
+ "learning_rate": 2.111578947368421e-07,
317
+ "loss": 3.6508,
318
+ "step": 3000
319
+ },
320
+ {
321
+ "epoch": 1.1627906976744187,
322
+ "eval_loss": 0.323486328125,
323
+ "eval_runtime": 2929.9225,
324
+ "eval_samples_per_second": 0.766,
325
+ "eval_steps_per_second": 0.024,
326
+ "eval_wer": 5.463383633511818,
327
+ "step": 3000
328
+ },
329
+ {
330
+ "epoch": 1.2015503875968991,
331
+ "grad_norm": 2.2800722122192383,
332
+ "learning_rate": 2.006315789473684e-07,
333
+ "loss": 3.6412,
334
+ "step": 3100
335
+ },
336
+ {
337
+ "epoch": 1.2403100775193798,
338
+ "grad_norm": 2.368227005004883,
339
+ "learning_rate": 1.9010526315789475e-07,
340
+ "loss": 3.6292,
341
+ "step": 3200
342
+ },
343
+ {
344
+ "epoch": 1.2596899224806202,
345
+ "eval_loss": 0.322021484375,
346
+ "eval_runtime": 2816.4937,
347
+ "eval_samples_per_second": 0.796,
348
+ "eval_steps_per_second": 0.025,
349
+ "eval_wer": 5.3511547406566065,
350
+ "step": 3250
351
+ },
352
+ {
353
+ "epoch": 1.2790697674418605,
354
+ "grad_norm": 2.3508598804473877,
355
+ "learning_rate": 1.7957894736842105e-07,
356
+ "loss": 3.6196,
357
+ "step": 3300
358
+ },
359
+ {
360
+ "epoch": 1.3178294573643412,
361
+ "grad_norm": 2.4686381816864014,
362
+ "learning_rate": 1.6905263157894737e-07,
363
+ "loss": 3.6282,
364
+ "step": 3400
365
+ },
366
+ {
367
+ "epoch": 1.3565891472868217,
368
+ "grad_norm": 2.3237240314483643,
369
+ "learning_rate": 1.5852631578947367e-07,
370
+ "loss": 3.6179,
371
+ "step": 3500
372
+ },
373
+ {
374
+ "epoch": 1.3565891472868217,
375
+ "eval_loss": 0.321044921875,
376
+ "eval_runtime": 2685.856,
377
+ "eval_samples_per_second": 0.835,
378
+ "eval_steps_per_second": 0.026,
379
+ "eval_wer": 5.225404294445346,
380
+ "step": 3500
381
+ },
382
+ {
383
+ "epoch": 1.3953488372093024,
384
+ "grad_norm": 2.2395377159118652,
385
+ "learning_rate": 1.4799999999999998e-07,
386
+ "loss": 3.6078,
387
+ "step": 3600
388
+ },
389
+ {
390
+ "epoch": 1.4341085271317828,
391
+ "grad_norm": 2.3762331008911133,
392
+ "learning_rate": 1.374736842105263e-07,
393
+ "loss": 3.6032,
394
+ "step": 3700
395
+ },
396
+ {
397
+ "epoch": 1.4534883720930232,
398
+ "eval_loss": 0.320556640625,
399
+ "eval_runtime": 2693.0163,
400
+ "eval_samples_per_second": 0.833,
401
+ "eval_steps_per_second": 0.026,
402
+ "eval_wer": 5.2206717507707285,
403
+ "step": 3750
404
+ },
405
+ {
406
+ "epoch": 1.4728682170542635,
407
+ "grad_norm": 2.3068864345550537,
408
+ "learning_rate": 1.2694736842105263e-07,
409
+ "loss": 3.5997,
410
+ "step": 3800
411
+ },
412
+ {
413
+ "epoch": 1.5116279069767442,
414
+ "grad_norm": 2.40097713470459,
415
+ "learning_rate": 1.1642105263157894e-07,
416
+ "loss": 3.588,
417
+ "step": 3900
418
+ },
419
+ {
420
+ "epoch": 1.550387596899225,
421
+ "grad_norm": 2.320568561553955,
422
+ "learning_rate": 1.0589473684210526e-07,
423
+ "loss": 3.5922,
424
+ "step": 4000
425
+ },
426
+ {
427
+ "epoch": 1.550387596899225,
428
+ "eval_loss": 0.320068359375,
429
+ "eval_runtime": 2803.374,
430
+ "eval_samples_per_second": 0.8,
431
+ "eval_steps_per_second": 0.025,
432
+ "eval_wer": 5.303829303910433,
433
+ "step": 4000
434
+ },
435
+ {
436
+ "epoch": 1.5891472868217056,
437
+ "grad_norm": 2.4094741344451904,
438
+ "learning_rate": 9.557894736842105e-08,
439
+ "loss": 3.5832,
440
+ "step": 4100
441
+ },
442
+ {
443
+ "epoch": 1.627906976744186,
444
+ "grad_norm": 2.448002815246582,
445
+ "learning_rate": 8.505263157894736e-08,
446
+ "loss": 3.5743,
447
+ "step": 4200
448
+ },
449
+ {
450
+ "epoch": 1.6472868217054264,
451
+ "eval_loss": 0.31982421875,
452
+ "eval_runtime": 2778.7179,
453
+ "eval_samples_per_second": 0.807,
454
+ "eval_steps_per_second": 0.026,
455
+ "eval_wer": 5.263264643842285,
456
+ "step": 4250
457
+ },
458
+ {
459
+ "epoch": 1.6666666666666665,
460
+ "grad_norm": 2.362182855606079,
461
+ "learning_rate": 7.452631578947369e-08,
462
+ "loss": 3.5831,
463
+ "step": 4300
464
+ },
465
+ {
466
+ "epoch": 1.7054263565891472,
467
+ "grad_norm": 2.3121325969696045,
468
+ "learning_rate": 6.4e-08,
469
+ "loss": 3.5738,
470
+ "step": 4400
471
+ },
472
+ {
473
+ "epoch": 1.744186046511628,
474
+ "grad_norm": 2.3000595569610596,
475
+ "learning_rate": 5.347368421052632e-08,
476
+ "loss": 3.5882,
477
+ "step": 4500
478
+ },
479
+ {
480
+ "epoch": 1.744186046511628,
481
+ "eval_loss": 0.31982421875,
482
+ "eval_runtime": 2718.6235,
483
+ "eval_samples_per_second": 0.825,
484
+ "eval_steps_per_second": 0.026,
485
+ "eval_wer": 5.225404294445346,
486
+ "step": 4500
487
+ },
488
+ {
489
+ "epoch": 1.7829457364341086,
490
+ "grad_norm": 2.3451480865478516,
491
+ "learning_rate": 4.2947368421052626e-08,
492
+ "loss": 3.5873,
493
+ "step": 4600
494
+ },
495
+ {
496
+ "epoch": 1.8217054263565893,
497
+ "grad_norm": 2.301790237426758,
498
+ "learning_rate": 3.2421052631578947e-08,
499
+ "loss": 3.6021,
500
+ "step": 4700
501
+ },
502
+ {
503
+ "epoch": 1.8410852713178296,
504
+ "eval_loss": 0.319580078125,
505
+ "eval_runtime": 2680.8066,
506
+ "eval_samples_per_second": 0.837,
507
+ "eval_steps_per_second": 0.026,
508
+ "eval_wer": 5.218643517767322,
509
+ "step": 4750
510
+ },
511
+ {
512
+ "epoch": 1.8604651162790697,
513
+ "grad_norm": 2.2558062076568604,
514
+ "learning_rate": 2.189473684210526e-08,
515
+ "loss": 3.5833,
516
+ "step": 4800
517
+ },
518
+ {
519
+ "epoch": 1.8992248062015504,
520
+ "grad_norm": 2.2542638778686523,
521
+ "learning_rate": 1.136842105263158e-08,
522
+ "loss": 3.5732,
523
+ "step": 4900
524
+ },
525
+ {
526
+ "epoch": 1.937984496124031,
527
+ "grad_norm": 2.378117322921753,
528
+ "learning_rate": 8.421052631578947e-10,
529
+ "loss": 3.5865,
530
+ "step": 5000
531
+ },
532
+ {
533
+ "epoch": 1.937984496124031,
534
+ "eval_loss": 0.3193359375,
535
+ "eval_runtime": 2696.1288,
536
+ "eval_samples_per_second": 0.832,
537
+ "eval_steps_per_second": 0.026,
538
+ "eval_wer": 5.221347828438531,
539
+ "step": 5000
540
+ },
541
+ {
542
+ "epoch": 1.937984496124031,
543
+ "step": 5000,
544
+ "total_flos": 1.087083891827713e+21,
545
+ "train_loss": 3.86036796875,
546
+ "train_runtime": 85811.7084,
547
+ "train_samples_per_second": 3.729,
548
+ "train_steps_per_second": 0.058
549
+ }
550
+ ],
551
+ "logging_steps": 100,
552
+ "max_steps": 5000,
553
+ "num_input_tokens_seen": 0,
554
+ "num_train_epochs": 2,
555
+ "save_steps": 250,
556
+ "stateful_callbacks": {
557
+ "TrainerControl": {
558
+ "args": {
559
+ "should_epoch_stop": false,
560
+ "should_evaluate": false,
561
+ "should_log": false,
562
+ "should_save": true,
563
+ "should_training_stop": true
564
+ },
565
+ "attributes": {}
566
+ }
567
+ },
568
+ "total_flos": 1.087083891827713e+21,
569
+ "train_batch_size": 64,
570
+ "trial_name": null,
571
+ "trial_params": null
572
+ }