neuralwonderland commited on
Commit
bf44bd2
·
verified ·
1 Parent(s): dba1f8e

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fb570867b39da8ca5f9dcb5a985b279fec72307f9c34b27d25f5a270d849bb9
3
  size 524363632
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7217176b2cddaa72e46eb30c9cc84a5be813fd6776facbaf277dcc8d7979eabd
3
  size 524363632
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c2a8d621b78697c67005e055c00235f3d9085c1c8d486bb9edd4ae03d403f83
3
  size 1049049442
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92f7dc1fcda761e3cebe060c99e1b3c732d2e254c2fdc7fe7c567151a7818c1f
3
  size 1049049442
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3dd9835961741b97ed6bc0c71057c6d15eaf84418093825034d870f676dd7ea
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2759f9dc525a7c6daf5d5bb9e1a98fb5fce6ddd8bd5344b2dc8d63886281857
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d349b525fc7c7eff553c27846b0059879b2d31c63b4e246efacc449795c47e3
3
  size 1256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36ecce3e1177343d53ff5033f14f9aeebc8d978f05d309eda4e2cfafa92535e9
3
  size 1256
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.2499778270721436,
3
- "best_model_checkpoint": "./output/checkpoint-450",
4
- "epoch": 0.020156774916013438,
5
  "eval_steps": 150,
6
- "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -346,6 +346,119 @@
346
  "eval_samples_per_second": 9.69,
347
  "eval_steps_per_second": 9.69,
348
  "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
  }
350
  ],
351
  "logging_steps": 10,
@@ -365,7 +478,7 @@
365
  "attributes": {}
366
  }
367
  },
368
- "total_flos": 5.76911261749248e+16,
369
  "train_batch_size": 4,
370
  "trial_name": null,
371
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.2407419681549072,
3
+ "best_model_checkpoint": "./output/checkpoint-600",
4
+ "epoch": 0.026875699888017916,
5
  "eval_steps": 150,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
346
  "eval_samples_per_second": 9.69,
347
  "eval_steps_per_second": 9.69,
348
  "step": 450
349
+ },
350
+ {
351
+ "epoch": 0.020604703247480403,
352
+ "grad_norm": 3.672325372695923,
353
+ "learning_rate": 7.400554539316894e-06,
354
+ "loss": 1.1627,
355
+ "step": 460
356
+ },
357
+ {
358
+ "epoch": 0.021052631578947368,
359
+ "grad_norm": 4.949635982513428,
360
+ "learning_rate": 7.394979347953081e-06,
361
+ "loss": 1.3115,
362
+ "step": 470
363
+ },
364
+ {
365
+ "epoch": 0.021500559910414333,
366
+ "grad_norm": 4.03855037689209,
367
+ "learning_rate": 7.389254325764681e-06,
368
+ "loss": 1.1176,
369
+ "step": 480
370
+ },
371
+ {
372
+ "epoch": 0.0219484882418813,
373
+ "grad_norm": 4.981250762939453,
374
+ "learning_rate": 7.383379708084934e-06,
375
+ "loss": 1.0668,
376
+ "step": 490
377
+ },
378
+ {
379
+ "epoch": 0.022396416573348264,
380
+ "grad_norm": 4.68571138381958,
381
+ "learning_rate": 7.377355736396362e-06,
382
+ "loss": 1.1235,
383
+ "step": 500
384
+ },
385
+ {
386
+ "epoch": 0.02284434490481523,
387
+ "grad_norm": 5.7003326416015625,
388
+ "learning_rate": 7.371182658320847e-06,
389
+ "loss": 1.0535,
390
+ "step": 510
391
+ },
392
+ {
393
+ "epoch": 0.023292273236282194,
394
+ "grad_norm": 2.357079029083252,
395
+ "learning_rate": 7.36486072760945e-06,
396
+ "loss": 0.9768,
397
+ "step": 520
398
+ },
399
+ {
400
+ "epoch": 0.02374020156774916,
401
+ "grad_norm": 4.828664779663086,
402
+ "learning_rate": 7.358390204131984e-06,
403
+ "loss": 1.0385,
404
+ "step": 530
405
+ },
406
+ {
407
+ "epoch": 0.024188129899216124,
408
+ "grad_norm": 3.4303321838378906,
409
+ "learning_rate": 7.3517713538663235e-06,
410
+ "loss": 0.9826,
411
+ "step": 540
412
+ },
413
+ {
414
+ "epoch": 0.02463605823068309,
415
+ "grad_norm": 8.705097198486328,
416
+ "learning_rate": 7.345004448887478e-06,
417
+ "loss": 1.0988,
418
+ "step": 550
419
+ },
420
+ {
421
+ "epoch": 0.025083986562150055,
422
+ "grad_norm": 4.806099891662598,
423
+ "learning_rate": 7.3380897673564085e-06,
424
+ "loss": 1.2765,
425
+ "step": 560
426
+ },
427
+ {
428
+ "epoch": 0.02553191489361702,
429
+ "grad_norm": 3.948829174041748,
430
+ "learning_rate": 7.33102759350859e-06,
431
+ "loss": 1.2548,
432
+ "step": 570
433
+ },
434
+ {
435
+ "epoch": 0.025979843225083985,
436
+ "grad_norm": 8.706982612609863,
437
+ "learning_rate": 7.323818217642328e-06,
438
+ "loss": 1.1907,
439
+ "step": 580
440
+ },
441
+ {
442
+ "epoch": 0.02642777155655095,
443
+ "grad_norm": 4.196287155151367,
444
+ "learning_rate": 7.316461936106827e-06,
445
+ "loss": 1.1541,
446
+ "step": 590
447
+ },
448
+ {
449
+ "epoch": 0.026875699888017916,
450
+ "grad_norm": 4.2185187339782715,
451
+ "learning_rate": 7.3089590512900084e-06,
452
+ "loss": 1.0761,
453
+ "step": 600
454
+ },
455
+ {
456
+ "epoch": 0.026875699888017916,
457
+ "eval_loss": 1.2407419681549072,
458
+ "eval_runtime": 51.6589,
459
+ "eval_samples_per_second": 9.679,
460
+ "eval_steps_per_second": 9.679,
461
+ "step": 600
462
  }
463
  ],
464
  "logging_steps": 10,
 
478
  "attributes": {}
479
  }
480
  },
481
+ "total_flos": 7.7474464828416e+16,
482
  "train_batch_size": 4,
483
  "trial_name": null,
484
  "trial_params": null