leixa commited on
Commit
251b656
·
verified ·
1 Parent(s): 7f67e10

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d32384f86741ea4337d9afc0d7a078fe3aa36fed3c3d9e70edfae22310ee876
3
  size 264285472
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3f427ba4b1e818d24db7b9b4d4d27306f060b68299e48b15443c18382ae678d
3
  size 264285472
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8ac9024c1f37492f3113611c2c865be1a76f1e7941939b9cbbfc0e34ff8415f
3
  size 134580692
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b666b3a8b61d7b820782b1d1b55d63f24c1c1bd8678d41e2c48f18f7e2bf3755
3
  size 134580692
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92a6ee507ed5c3db946f7f697487dc8cd80b4be1732337c640133afa68554a78
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab20b00e42e594012f95a32f1dc278947bdfd0ca07fcada88f4fc2c8e3945752
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70f0f789b56065211b8c0b1a5e2a97dd0b5b08a816bbbe288fb6f9c677282af9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ad54995b081fae25638228c5d9c8f38ca277e5c5ad00bc3e49897b543f84405
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.09971451759338379,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-450",
4
- "epoch": 1.0198075834748161,
5
  "eval_steps": 50,
6
- "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -402,6 +402,135 @@
402
  "eval_samples_per_second": 11.199,
403
  "eval_steps_per_second": 2.8,
404
  "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
405
  }
406
  ],
407
  "logging_steps": 10,
@@ -425,12 +554,12 @@
425
  "should_evaluate": false,
426
  "should_log": false,
427
  "should_save": true,
428
- "should_training_stop": false
429
  },
430
  "attributes": {}
431
  }
432
  },
433
- "total_flos": 5.258291218951373e+17,
434
  "train_batch_size": 8,
435
  "trial_name": null,
436
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.09790434688329697,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-600",
4
+ "epoch": 1.3593661573288058,
5
  "eval_steps": 50,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
402
  "eval_samples_per_second": 11.199,
403
  "eval_steps_per_second": 2.8,
404
  "step": 450
405
+ },
406
+ {
407
+ "epoch": 1.0424448217317488,
408
+ "grad_norm": 0.25138339400291443,
409
+ "learning_rate": 2.6522584913693294e-05,
410
+ "loss": 0.2796,
411
+ "step": 460
412
+ },
413
+ {
414
+ "epoch": 1.0650820599886814,
415
+ "grad_norm": 0.1467200219631195,
416
+ "learning_rate": 2.301660165700936e-05,
417
+ "loss": 0.168,
418
+ "step": 470
419
+ },
420
+ {
421
+ "epoch": 1.087719298245614,
422
+ "grad_norm": 0.18437053263187408,
423
+ "learning_rate": 1.9728836206903656e-05,
424
+ "loss": 0.1526,
425
+ "step": 480
426
+ },
427
+ {
428
+ "epoch": 1.1103565365025467,
429
+ "grad_norm": 0.00036731516593135893,
430
+ "learning_rate": 1.6668608091748495e-05,
431
+ "loss": 0.0535,
432
+ "step": 490
433
+ },
434
+ {
435
+ "epoch": 1.1329937747594794,
436
+ "grad_norm": 0.2067088931798935,
437
+ "learning_rate": 1.3844591860619383e-05,
438
+ "loss": 1.1511,
439
+ "step": 500
440
+ },
441
+ {
442
+ "epoch": 1.1329937747594794,
443
+ "eval_loss": 0.09841449558734894,
444
+ "eval_runtime": 66.7899,
445
+ "eval_samples_per_second": 11.139,
446
+ "eval_steps_per_second": 2.785,
447
+ "step": 500
448
+ },
449
+ {
450
+ "epoch": 1.155631013016412,
451
+ "grad_norm": 0.13695839047431946,
452
+ "learning_rate": 1.1264792494342857e-05,
453
+ "loss": 0.2589,
454
+ "step": 510
455
+ },
456
+ {
457
+ "epoch": 1.1782682512733447,
458
+ "grad_norm": 0.1652461439371109,
459
+ "learning_rate": 8.936522714508678e-06,
460
+ "loss": 0.1598,
461
+ "step": 520
462
+ },
463
+ {
464
+ "epoch": 1.2009054895302773,
465
+ "grad_norm": 0.12390118092298508,
466
+ "learning_rate": 6.866382254766157e-06,
467
+ "loss": 0.1742,
468
+ "step": 530
469
+ },
470
+ {
471
+ "epoch": 1.22354272778721,
472
+ "grad_norm": 0.00036215633735992014,
473
+ "learning_rate": 5.060239153161872e-06,
474
+ "loss": 0.0305,
475
+ "step": 540
476
+ },
477
+ {
478
+ "epoch": 1.2461799660441426,
479
+ "grad_norm": 0.20766952633857727,
480
+ "learning_rate": 3.5232131185484076e-06,
481
+ "loss": 1.2,
482
+ "step": 550
483
+ },
484
+ {
485
+ "epoch": 1.2461799660441426,
486
+ "eval_loss": 0.09686872363090515,
487
+ "eval_runtime": 67.296,
488
+ "eval_samples_per_second": 11.056,
489
+ "eval_steps_per_second": 2.764,
490
+ "step": 550
491
+ },
492
+ {
493
+ "epoch": 1.2688172043010753,
494
+ "grad_norm": 0.07413782179355621,
495
+ "learning_rate": 2.259661018213333e-06,
496
+ "loss": 0.2373,
497
+ "step": 560
498
+ },
499
+ {
500
+ "epoch": 1.291454442558008,
501
+ "grad_norm": 0.20543290674686432,
502
+ "learning_rate": 1.2731645278655445e-06,
503
+ "loss": 0.1418,
504
+ "step": 570
505
+ },
506
+ {
507
+ "epoch": 1.3140916808149405,
508
+ "grad_norm": 0.13837286829948425,
509
+ "learning_rate": 5.665199789862907e-07,
510
+ "loss": 0.1563,
511
+ "step": 580
512
+ },
513
+ {
514
+ "epoch": 1.3367289190718732,
515
+ "grad_norm": 0.0003274640184827149,
516
+ "learning_rate": 1.4173043232380557e-07,
517
+ "loss": 0.0364,
518
+ "step": 590
519
+ },
520
+ {
521
+ "epoch": 1.3593661573288058,
522
+ "grad_norm": 0.17645882070064545,
523
+ "learning_rate": 0.0,
524
+ "loss": 1.1926,
525
+ "step": 600
526
+ },
527
+ {
528
+ "epoch": 1.3593661573288058,
529
+ "eval_loss": 0.09790434688329697,
530
+ "eval_runtime": 67.234,
531
+ "eval_samples_per_second": 11.066,
532
+ "eval_steps_per_second": 2.766,
533
+ "step": 600
534
  }
535
  ],
536
  "logging_steps": 10,
 
554
  "should_evaluate": false,
555
  "should_log": false,
556
  "should_save": true,
557
+ "should_training_stop": true
558
  },
559
  "attributes": {}
560
  }
561
  },
562
+ "total_flos": 7.010086580845117e+17,
563
  "train_batch_size": 8,
564
  "trial_name": null,
565
  "trial_params": null