error577 commited on
Commit
f00de32
·
verified ·
1 Parent(s): 857135f

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:261f8727997945eb87e0bb82facc9b388349955b33cdb62b6807af15ec1fdceb
3
  size 6804608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afedcef01aff06d1a1629af9649652465294544d1766b2b66e5256f4566d41c0
3
  size 6804608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6c1e6436794826f46c90dbc8a3888e0168b34c222f452abc2c1e417b5fed11c
3
  size 3633530
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:842bcbe5659887f2e97d02481adc2aece3d74b7321b3d04cb710caf350a1400c
3
  size 3633530
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:682fa06fe8ef6e09ffdeef985646187c38172ce94e6a1d0b34ec5f6df8451d9d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e1e3cb587636d18b6ef8df94bb236a247027fd147ec9de2e9e9f59d9d4f903a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f23e2214bcafb439ebc7528dcc283ef6218d509a276c0baff0743503ecbe3d92
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d60a69e2379be2053e816cbaff31e6c931b5922dd86c71c9eaf473299cbf62
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.2577873254564984,
5
  "eval_steps": 50,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -548,6 +548,189 @@
548
  "learning_rate": 1.7860619515673033e-05,
549
  "loss": 2.2528,
550
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
  }
552
  ],
553
  "logging_steps": 1,
@@ -562,12 +745,12 @@
562
  "should_evaluate": false,
563
  "should_log": false,
564
  "should_save": true,
565
- "should_training_stop": false
566
  },
567
  "attributes": {}
568
  }
569
  },
570
- "total_flos": 1619041760575488.0,
571
  "train_batch_size": 1,
572
  "trial_name": null,
573
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.34371643394199786,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
548
  "learning_rate": 1.7860619515673033e-05,
549
  "loss": 2.2528,
550
  "step": 75
551
+ },
552
+ {
553
+ "epoch": 0.2612244897959184,
554
+ "grad_norm": 0.31380581855773926,
555
+ "learning_rate": 1.6543469682057106e-05,
556
+ "loss": 2.2103,
557
+ "step": 76
558
+ },
559
+ {
560
+ "epoch": 0.2646616541353383,
561
+ "grad_norm": 0.35320228338241577,
562
+ "learning_rate": 1.526708147705013e-05,
563
+ "loss": 2.2328,
564
+ "step": 77
565
+ },
566
+ {
567
+ "epoch": 0.2680988184747583,
568
+ "grad_norm": 0.3124861419200897,
569
+ "learning_rate": 1.4033009983067452e-05,
570
+ "loss": 2.2054,
571
+ "step": 78
572
+ },
573
+ {
574
+ "epoch": 0.2715359828141783,
575
+ "grad_norm": 0.4113110899925232,
576
+ "learning_rate": 1.2842758726130283e-05,
577
+ "loss": 2.0905,
578
+ "step": 79
579
+ },
580
+ {
581
+ "epoch": 0.2749731471535983,
582
+ "grad_norm": 0.47795024514198303,
583
+ "learning_rate": 1.1697777844051105e-05,
584
+ "loss": 2.3201,
585
+ "step": 80
586
+ },
587
+ {
588
+ "epoch": 0.27841031149301826,
589
+ "grad_norm": 0.29258468747138977,
590
+ "learning_rate": 1.0599462319663905e-05,
591
+ "loss": 2.2022,
592
+ "step": 81
593
+ },
594
+ {
595
+ "epoch": 0.28184747583243824,
596
+ "grad_norm": 0.28537437319755554,
597
+ "learning_rate": 9.549150281252633e-06,
598
+ "loss": 2.2611,
599
+ "step": 82
600
+ },
601
+ {
602
+ "epoch": 0.28528464017185823,
603
+ "grad_norm": 0.3414091467857361,
604
+ "learning_rate": 8.548121372247918e-06,
605
+ "loss": 2.2069,
606
+ "step": 83
607
+ },
608
+ {
609
+ "epoch": 0.2887218045112782,
610
+ "grad_norm": 0.39266514778137207,
611
+ "learning_rate": 7.597595192178702e-06,
612
+ "loss": 2.2485,
613
+ "step": 84
614
+ },
615
+ {
616
+ "epoch": 0.2921589688506982,
617
+ "grad_norm": 0.5153230428695679,
618
+ "learning_rate": 6.698729810778065e-06,
619
+ "loss": 2.1257,
620
+ "step": 85
621
+ },
622
+ {
623
+ "epoch": 0.2955961331901181,
624
+ "grad_norm": 0.31598207354545593,
625
+ "learning_rate": 5.852620357053651e-06,
626
+ "loss": 2.1899,
627
+ "step": 86
628
+ },
629
+ {
630
+ "epoch": 0.2990332975295381,
631
+ "grad_norm": 0.3173138201236725,
632
+ "learning_rate": 5.060297685041659e-06,
633
+ "loss": 2.209,
634
+ "step": 87
635
+ },
636
+ {
637
+ "epoch": 0.3024704618689581,
638
+ "grad_norm": 0.28669506311416626,
639
+ "learning_rate": 4.322727117869951e-06,
640
+ "loss": 2.1869,
641
+ "step": 88
642
+ },
643
+ {
644
+ "epoch": 0.3059076262083781,
645
+ "grad_norm": 0.31095296144485474,
646
+ "learning_rate": 3.6408072716606346e-06,
647
+ "loss": 2.164,
648
+ "step": 89
649
+ },
650
+ {
651
+ "epoch": 0.30934479054779807,
652
+ "grad_norm": 0.27381399273872375,
653
+ "learning_rate": 3.0153689607045845e-06,
654
+ "loss": 2.202,
655
+ "step": 90
656
+ },
657
+ {
658
+ "epoch": 0.31278195488721805,
659
+ "grad_norm": 0.4227629601955414,
660
+ "learning_rate": 2.4471741852423237e-06,
661
+ "loss": 2.3388,
662
+ "step": 91
663
+ },
664
+ {
665
+ "epoch": 0.31621911922663803,
666
+ "grad_norm": 0.4945060908794403,
667
+ "learning_rate": 1.9369152030840556e-06,
668
+ "loss": 2.1518,
669
+ "step": 92
670
+ },
671
+ {
672
+ "epoch": 0.319656283566058,
673
+ "grad_norm": 0.3266584873199463,
674
+ "learning_rate": 1.4852136862001764e-06,
675
+ "loss": 2.3398,
676
+ "step": 93
677
+ },
678
+ {
679
+ "epoch": 0.323093447905478,
680
+ "grad_norm": 0.2841348648071289,
681
+ "learning_rate": 1.0926199633097157e-06,
682
+ "loss": 2.1788,
683
+ "step": 94
684
+ },
685
+ {
686
+ "epoch": 0.32653061224489793,
687
+ "grad_norm": 0.2800915539264679,
688
+ "learning_rate": 7.596123493895991e-07,
689
+ "loss": 2.121,
690
+ "step": 95
691
+ },
692
+ {
693
+ "epoch": 0.3299677765843179,
694
+ "grad_norm": 0.3364481031894684,
695
+ "learning_rate": 4.865965629214819e-07,
696
+ "loss": 2.2739,
697
+ "step": 96
698
+ },
699
+ {
700
+ "epoch": 0.3334049409237379,
701
+ "grad_norm": 0.5338711738586426,
702
+ "learning_rate": 2.7390523158633554e-07,
703
+ "loss": 2.2574,
704
+ "step": 97
705
+ },
706
+ {
707
+ "epoch": 0.3368421052631579,
708
+ "grad_norm": 0.29511675238609314,
709
+ "learning_rate": 1.2179748700879012e-07,
710
+ "loss": 2.1717,
711
+ "step": 98
712
+ },
713
+ {
714
+ "epoch": 0.34027926960257787,
715
+ "grad_norm": 0.31296125054359436,
716
+ "learning_rate": 3.04586490452119e-08,
717
+ "loss": 2.1663,
718
+ "step": 99
719
+ },
720
+ {
721
+ "epoch": 0.34371643394199786,
722
+ "grad_norm": 0.2975694537162781,
723
+ "learning_rate": 0.0,
724
+ "loss": 2.1862,
725
+ "step": 100
726
+ },
727
+ {
728
+ "epoch": 0.34371643394199786,
729
+ "eval_loss": 2.2586705684661865,
730
+ "eval_runtime": 16.6976,
731
+ "eval_samples_per_second": 29.345,
732
+ "eval_steps_per_second": 29.345,
733
+ "step": 100
734
  }
735
  ],
736
  "logging_steps": 1,
 
745
  "should_evaluate": false,
746
  "should_log": false,
747
  "should_save": true,
748
+ "should_training_stop": true
749
  },
750
  "attributes": {}
751
  }
752
  },
753
+ "total_flos": 2162580270612480.0,
754
  "train_batch_size": 1,
755
  "trial_name": null,
756
  "trial_params": null