Silemo commited on
Commit
7c043ae
·
1 Parent(s): 359c0c2

Training in progress, step 1000

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dd37d31702ebefc1c3b2f7fbe40dde15e2e75fed45e34501991f88484438652
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:363a18dff03938cbab3ea0517e5fd8baacab97d4647e9d18d67491af3678daa6
3
  size 966995080
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:609a70c81c9e6b33ff51be43f209abfd2dc1b4e00c510020614edcb3823c6c0f
3
  size 1925064044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec51eb4084dc7356ccd5e4f6e60d6eb446a9f6b453cb22d3a3f27e5a78818d65
3
  size 1925064044
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20fa2912fae4d301064dd04015af8bf84c4ac5e665fa945460a03435089afc55
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:140766ee026fe5edfef0f038f98f0479f8a9d10cfc1f7fe43d81468743578807
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c59ded4a6c03f3718ee2404fab623888997ec521e28e6156a5fecb6d237f2d56
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5923d5ad322acc38eebeb7826b39a3fb9c19734502e2060fc78e9bf4e91b73b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 70.1955074875208,
3
  "best_model_checkpoint": "./whisper-it/checkpoint-300",
4
- "epoch": 1.717557251908397,
5
  "eval_steps": 100,
6
- "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -238,72 +238,6 @@
238
  "eval_steps_per_second": 0.112,
239
  "eval_wer": 90.3563505268996,
240
  "step": 700
241
- },
242
- {
243
- "epoch": 1.38,
244
- "learning_rate": 9.374285714285715e-06,
245
- "loss": 0.1626,
246
- "step": 725
247
- },
248
- {
249
- "epoch": 1.43,
250
- "learning_rate": 9.302857142857144e-06,
251
- "loss": 0.1674,
252
- "step": 750
253
- },
254
- {
255
- "epoch": 1.48,
256
- "learning_rate": 9.231428571428573e-06,
257
- "loss": 0.1717,
258
- "step": 775
259
- },
260
- {
261
- "epoch": 1.53,
262
- "learning_rate": 9.16e-06,
263
- "loss": 0.1607,
264
- "step": 800
265
- },
266
- {
267
- "epoch": 1.53,
268
- "eval_loss": 0.3789268732070923,
269
- "eval_runtime": 1834.2673,
270
- "eval_samples_per_second": 0.818,
271
- "eval_steps_per_second": 0.102,
272
- "eval_wer": 134.98336106489182,
273
- "step": 800
274
- },
275
- {
276
- "epoch": 1.57,
277
- "learning_rate": 9.08857142857143e-06,
278
- "loss": 0.1636,
279
- "step": 825
280
- },
281
- {
282
- "epoch": 1.62,
283
- "learning_rate": 9.017142857142858e-06,
284
- "loss": 0.1625,
285
- "step": 850
286
- },
287
- {
288
- "epoch": 1.67,
289
- "learning_rate": 8.945714285714286e-06,
290
- "loss": 0.1762,
291
- "step": 875
292
- },
293
- {
294
- "epoch": 1.72,
295
- "learning_rate": 8.874285714285715e-06,
296
- "loss": 0.154,
297
- "step": 900
298
- },
299
- {
300
- "epoch": 1.72,
301
- "eval_loss": 0.3783314824104309,
302
- "eval_runtime": 1787.7259,
303
- "eval_samples_per_second": 0.839,
304
- "eval_steps_per_second": 0.105,
305
- "eval_wer": 99.0501941209096,
306
- "step": 900
307
  }
308
  ],
309
  "logging_steps": 25,
@@ -311,7 +245,7 @@
311
  "num_input_tokens_seen": 0,
312
  "num_train_epochs": 8,
313
  "save_steps": 100,
314
- "total_flos": 8.30923943141376e+18,
315
  "trial_name": null,
316
  "trial_params": null
317
  }
 
1
  {
2
  "best_metric": 70.1955074875208,
3
  "best_model_checkpoint": "./whisper-it/checkpoint-300",
4
+ "epoch": 1.33587786259542,
5
  "eval_steps": 100,
6
+ "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
238
  "eval_steps_per_second": 0.112,
239
  "eval_wer": 90.3563505268996,
240
  "step": 700
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241
  }
242
  ],
243
  "logging_steps": 25,
 
245
  "num_input_tokens_seen": 0,
246
  "num_train_epochs": 8,
247
  "save_steps": 100,
248
+ "total_flos": 6.46229286936576e+18,
249
  "trial_name": null,
250
  "trial_params": null
251
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26fb5b39845f83ef97933929d4301eb10fbbba57913dc6054b50bd05a1ce5a9f
3
  size 4792
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8639ff321d8732686fcd47480f025401063d95f80a6017764993068cc24ca794
3
  size 4792
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dd37d31702ebefc1c3b2f7fbe40dde15e2e75fed45e34501991f88484438652
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8be1c95dfd279bbe6f6a75bce4f3ce18633cb0a7088267acd5b4658adc3b6d4
3
  size 966995080
runs/Dec03_15-54-33_d98905377cf4/events.out.tfevents.1701618890.d98905377cf4.6716.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c65958e82ebea50cbb6a5260620ab10228e91db1b5237777f03fc81587b5830
3
- size 7023
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:547ba941e8cb2065d06e8d8bda6a4c23ace75038cf394dc2ff2bbc8c1655368f
3
+ size 7969