csikasote commited on
Commit
d5603be
·
verified ·
1 Parent(s): 12c8c37

End of training

Browse files
README.md CHANGED
@@ -4,11 +4,23 @@ license: apache-2.0
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
 
 
7
  metrics:
8
  - wer
9
  model-index:
10
  - name: whisper-medium-toigen-male-model
11
- results: []
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -16,10 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # whisper-medium-toigen-male-model
18
 
19
- This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
- - Loss: 1.0236
22
- - Wer: 0.4448
23
 
24
  ## Model description
25
 
 
4
  base_model: openai/whisper-medium
5
  tags:
6
  - generated_from_trainer
7
+ datasets:
8
+ - toigen
9
  metrics:
10
  - wer
11
  model-index:
12
  - name: whisper-medium-toigen-male-model
13
+ results:
14
+ - task:
15
+ name: Automatic Speech Recognition
16
+ type: automatic-speech-recognition
17
+ dataset:
18
+ name: toigen
19
+ type: toigen
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.5695876288659794
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-medium-toigen-male-model
30
 
31
+ This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the toigen dataset.
32
  It achieves the following results on the evaluation set:
33
+ - Loss: 0.9393
34
+ - Wer: 0.5696
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.437054631828978,
3
+ "eval_loss": 0.9392894506454468,
4
+ "eval_runtime": 129.543,
5
+ "eval_samples": 211,
6
+ "eval_samples_per_second": 1.629,
7
+ "eval_steps_per_second": 0.818,
8
+ "eval_wer": 0.5695876288659794,
9
+ "total_flos": 8.10972659515392e+18,
10
+ "train_loss": 2.490273738861084,
11
+ "train_runtime": 2581.6643,
12
+ "train_samples": 842,
13
+ "train_samples_per_second": 15.494,
14
+ "train_steps_per_second": 1.937
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.437054631828978,
3
+ "eval_loss": 0.9392894506454468,
4
+ "eval_runtime": 129.543,
5
+ "eval_samples": 211,
6
+ "eval_samples_per_second": 1.629,
7
+ "eval_steps_per_second": 0.818,
8
+ "eval_wer": 0.5695876288659794
9
+ }
runs/Jan05_19-19-34_srvrocgpu011.uct.ac.za/events.out.tfevents.1736100544.srvrocgpu011.uct.ac.za ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d50277442a589f6597fa38388a9f790f456219adda6c18fe8a23471bc5514bd4
3
+ size 40
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 9.437054631828978,
3
+ "total_flos": 8.10972659515392e+18,
4
+ "train_loss": 2.490273738861084,
5
+ "train_runtime": 2581.6643,
6
+ "train_samples": 842,
7
+ "train_samples_per_second": 15.494,
8
+ "train_steps_per_second": 1.937
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.9392894506454468,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/whisper-medium-toigen-male-model/checkpoint-400",
4
+ "epoch": 9.437054631828978,
5
+ "eval_steps": 200,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2375296912114014,
13
+ "grad_norm": 150.66383361816406,
14
+ "learning_rate": 4.0000000000000003e-07,
15
+ "loss": 14.4659,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.4750593824228028,
20
+ "grad_norm": 96.092529296875,
21
+ "learning_rate": 9.000000000000001e-07,
22
+ "loss": 11.6879,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.7125890736342043,
27
+ "grad_norm": 94.01177215576172,
28
+ "learning_rate": 1.3800000000000001e-06,
29
+ "loss": 9.6203,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.9501187648456056,
34
+ "grad_norm": 88.87047576904297,
35
+ "learning_rate": 1.8800000000000002e-06,
36
+ "loss": 7.6311,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 1.180522565320665,
41
+ "grad_norm": 77.90989685058594,
42
+ "learning_rate": 2.38e-06,
43
+ "loss": 6.1323,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 1.4180522565320666,
48
+ "grad_norm": 63.12504959106445,
49
+ "learning_rate": 2.88e-06,
50
+ "loss": 5.5927,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 1.655581947743468,
55
+ "grad_norm": 63.86695861816406,
56
+ "learning_rate": 3.3800000000000007e-06,
57
+ "loss": 5.0682,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 1.8931116389548692,
62
+ "grad_norm": 75.34732055664062,
63
+ "learning_rate": 3.88e-06,
64
+ "loss": 4.343,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 1.8931116389548692,
69
+ "eval_loss": 1.066441535949707,
70
+ "eval_runtime": 125.6362,
71
+ "eval_samples_per_second": 1.679,
72
+ "eval_steps_per_second": 0.844,
73
+ "eval_wer": 0.606701030927835,
74
+ "step": 200
75
+ },
76
+ {
77
+ "epoch": 2.1235154394299287,
78
+ "grad_norm": 74.82162475585938,
79
+ "learning_rate": 4.38e-06,
80
+ "loss": 3.9738,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 2.36104513064133,
85
+ "grad_norm": 60.270687103271484,
86
+ "learning_rate": 4.880000000000001e-06,
87
+ "loss": 3.4967,
88
+ "step": 250
89
+ },
90
+ {
91
+ "epoch": 2.598574821852732,
92
+ "grad_norm": 72.7863998413086,
93
+ "learning_rate": 5.380000000000001e-06,
94
+ "loss": 3.4653,
95
+ "step": 275
96
+ },
97
+ {
98
+ "epoch": 2.836104513064133,
99
+ "grad_norm": 42.029544830322266,
100
+ "learning_rate": 5.8800000000000005e-06,
101
+ "loss": 3.0671,
102
+ "step": 300
103
+ },
104
+ {
105
+ "epoch": 3.0665083135391926,
106
+ "grad_norm": 58.11145782470703,
107
+ "learning_rate": 6.380000000000001e-06,
108
+ "loss": 2.7671,
109
+ "step": 325
110
+ },
111
+ {
112
+ "epoch": 3.304038004750594,
113
+ "grad_norm": 72.86565399169922,
114
+ "learning_rate": 6.88e-06,
115
+ "loss": 2.1218,
116
+ "step": 350
117
+ },
118
+ {
119
+ "epoch": 3.5415676959619953,
120
+ "grad_norm": 29.798490524291992,
121
+ "learning_rate": 7.3800000000000005e-06,
122
+ "loss": 1.8895,
123
+ "step": 375
124
+ },
125
+ {
126
+ "epoch": 3.7790973871733966,
127
+ "grad_norm": 62.0257453918457,
128
+ "learning_rate": 7.88e-06,
129
+ "loss": 1.9496,
130
+ "step": 400
131
+ },
132
+ {
133
+ "epoch": 3.7790973871733966,
134
+ "eval_loss": 0.9392894506454468,
135
+ "eval_runtime": 129.8196,
136
+ "eval_samples_per_second": 1.625,
137
+ "eval_steps_per_second": 0.817,
138
+ "eval_wer": 0.5695876288659794,
139
+ "step": 400
140
+ },
141
+ {
142
+ "epoch": 4.009501187648456,
143
+ "grad_norm": 36.066246032714844,
144
+ "learning_rate": 8.380000000000001e-06,
145
+ "loss": 1.9781,
146
+ "step": 425
147
+ },
148
+ {
149
+ "epoch": 4.247030878859857,
150
+ "grad_norm": 42.63113021850586,
151
+ "learning_rate": 8.880000000000001e-06,
152
+ "loss": 1.0441,
153
+ "step": 450
154
+ },
155
+ {
156
+ "epoch": 4.484560570071259,
157
+ "grad_norm": 33.34630584716797,
158
+ "learning_rate": 9.38e-06,
159
+ "loss": 1.1374,
160
+ "step": 475
161
+ },
162
+ {
163
+ "epoch": 4.72209026128266,
164
+ "grad_norm": 61.29568862915039,
165
+ "learning_rate": 9.88e-06,
166
+ "loss": 1.0169,
167
+ "step": 500
168
+ },
169
+ {
170
+ "epoch": 4.959619952494061,
171
+ "grad_norm": 49.31779861450195,
172
+ "learning_rate": 9.957777777777779e-06,
173
+ "loss": 1.3693,
174
+ "step": 525
175
+ },
176
+ {
177
+ "epoch": 5.190023752969121,
178
+ "grad_norm": 35.42589569091797,
179
+ "learning_rate": 9.902222222222223e-06,
180
+ "loss": 0.7001,
181
+ "step": 550
182
+ },
183
+ {
184
+ "epoch": 5.427553444180522,
185
+ "grad_norm": 22.584501266479492,
186
+ "learning_rate": 9.846666666666668e-06,
187
+ "loss": 0.5772,
188
+ "step": 575
189
+ },
190
+ {
191
+ "epoch": 5.665083135391924,
192
+ "grad_norm": 21.677404403686523,
193
+ "learning_rate": 9.791111111111112e-06,
194
+ "loss": 0.702,
195
+ "step": 600
196
+ },
197
+ {
198
+ "epoch": 5.665083135391924,
199
+ "eval_loss": 0.9410276412963867,
200
+ "eval_runtime": 124.8464,
201
+ "eval_samples_per_second": 1.69,
202
+ "eval_steps_per_second": 0.849,
203
+ "eval_wer": 0.4814432989690722,
204
+ "step": 600
205
+ },
206
+ {
207
+ "epoch": 5.902612826603326,
208
+ "grad_norm": 27.595264434814453,
209
+ "learning_rate": 9.735555555555556e-06,
210
+ "loss": 0.6341,
211
+ "step": 625
212
+ },
213
+ {
214
+ "epoch": 6.133016627078385,
215
+ "grad_norm": 15.151907920837402,
216
+ "learning_rate": 9.68e-06,
217
+ "loss": 0.39,
218
+ "step": 650
219
+ },
220
+ {
221
+ "epoch": 6.370546318289787,
222
+ "grad_norm": 63.603759765625,
223
+ "learning_rate": 9.624444444444445e-06,
224
+ "loss": 0.3234,
225
+ "step": 675
226
+ },
227
+ {
228
+ "epoch": 6.608076009501188,
229
+ "grad_norm": 33.50586700439453,
230
+ "learning_rate": 9.56888888888889e-06,
231
+ "loss": 0.2813,
232
+ "step": 700
233
+ },
234
+ {
235
+ "epoch": 6.845605700712589,
236
+ "grad_norm": 29.78474235534668,
237
+ "learning_rate": 9.513333333333334e-06,
238
+ "loss": 0.3366,
239
+ "step": 725
240
+ },
241
+ {
242
+ "epoch": 7.076009501187649,
243
+ "grad_norm": 13.300681114196777,
244
+ "learning_rate": 9.457777777777778e-06,
245
+ "loss": 0.2659,
246
+ "step": 750
247
+ },
248
+ {
249
+ "epoch": 7.31353919239905,
250
+ "grad_norm": 26.387012481689453,
251
+ "learning_rate": 9.402222222222222e-06,
252
+ "loss": 0.1854,
253
+ "step": 775
254
+ },
255
+ {
256
+ "epoch": 7.551068883610451,
257
+ "grad_norm": 20.7415771484375,
258
+ "learning_rate": 9.346666666666666e-06,
259
+ "loss": 0.2108,
260
+ "step": 800
261
+ },
262
+ {
263
+ "epoch": 7.551068883610451,
264
+ "eval_loss": 0.9733582735061646,
265
+ "eval_runtime": 125.0244,
266
+ "eval_samples_per_second": 1.688,
267
+ "eval_steps_per_second": 0.848,
268
+ "eval_wer": 0.4551546391752577,
269
+ "step": 800
270
+ },
271
+ {
272
+ "epoch": 7.788598574821853,
273
+ "grad_norm": 16.81197166442871,
274
+ "learning_rate": 9.291111111111112e-06,
275
+ "loss": 0.211,
276
+ "step": 825
277
+ },
278
+ {
279
+ "epoch": 8.019002375296912,
280
+ "grad_norm": 8.114081382751465,
281
+ "learning_rate": 9.235555555555556e-06,
282
+ "loss": 0.2144,
283
+ "step": 850
284
+ },
285
+ {
286
+ "epoch": 8.256532066508314,
287
+ "grad_norm": 7.230250835418701,
288
+ "learning_rate": 9.180000000000002e-06,
289
+ "loss": 0.1472,
290
+ "step": 875
291
+ },
292
+ {
293
+ "epoch": 8.494061757719715,
294
+ "grad_norm": 34.53820037841797,
295
+ "learning_rate": 9.124444444444444e-06,
296
+ "loss": 0.1506,
297
+ "step": 900
298
+ },
299
+ {
300
+ "epoch": 8.731591448931116,
301
+ "grad_norm": 20.564693450927734,
302
+ "learning_rate": 9.06888888888889e-06,
303
+ "loss": 0.1414,
304
+ "step": 925
305
+ },
306
+ {
307
+ "epoch": 8.969121140142517,
308
+ "grad_norm": 24.549924850463867,
309
+ "learning_rate": 9.013333333333334e-06,
310
+ "loss": 0.1398,
311
+ "step": 950
312
+ },
313
+ {
314
+ "epoch": 9.199524940617577,
315
+ "grad_norm": 9.88586139678955,
316
+ "learning_rate": 8.957777777777778e-06,
317
+ "loss": 0.0744,
318
+ "step": 975
319
+ },
320
+ {
321
+ "epoch": 9.437054631828978,
322
+ "grad_norm": 7.651462554931641,
323
+ "learning_rate": 8.902222222222224e-06,
324
+ "loss": 0.1073,
325
+ "step": 1000
326
+ },
327
+ {
328
+ "epoch": 9.437054631828978,
329
+ "eval_loss": 1.0236197710037231,
330
+ "eval_runtime": 125.7559,
331
+ "eval_samples_per_second": 1.678,
332
+ "eval_steps_per_second": 0.843,
333
+ "eval_wer": 0.44484536082474224,
334
+ "step": 1000
335
+ },
336
+ {
337
+ "epoch": 9.437054631828978,
338
+ "step": 1000,
339
+ "total_flos": 8.10972659515392e+18,
340
+ "train_loss": 2.490273738861084,
341
+ "train_runtime": 2581.6643,
342
+ "train_samples_per_second": 15.494,
343
+ "train_steps_per_second": 1.937
344
+ }
345
+ ],
346
+ "logging_steps": 25,
347
+ "max_steps": 5000,
348
+ "num_input_tokens_seen": 0,
349
+ "num_train_epochs": 48,
350
+ "save_steps": 200,
351
+ "stateful_callbacks": {
352
+ "EarlyStoppingCallback": {
353
+ "args": {
354
+ "early_stopping_patience": 3,
355
+ "early_stopping_threshold": 0.0
356
+ },
357
+ "attributes": {
358
+ "early_stopping_patience_counter": 3
359
+ }
360
+ },
361
+ "TrainerControl": {
362
+ "args": {
363
+ "should_epoch_stop": false,
364
+ "should_evaluate": false,
365
+ "should_log": false,
366
+ "should_save": true,
367
+ "should_training_stop": true
368
+ },
369
+ "attributes": {}
370
+ }
371
+ },
372
+ "total_flos": 8.10972659515392e+18,
373
+ "train_batch_size": 2,
374
+ "trial_name": null,
375
+ "trial_params": null
376
+ }