farmery commited on
Commit
ac82103
·
verified ·
1 Parent(s): 848e514

Training in progress, step 119, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2161b68ccac46e6adcd39ba08b77a3f91e0f2d1d756864d956937a8c5a8b4da8
3
  size 319876032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e18428ad05cb4e6c1b584c058e2fe14da0c9e36dd875f9be9df4c20094727584
3
  size 319876032
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b8ce07d2a26cfd741e33d4f6cba3520b2e9931ec74e73202768eb8eab525c03
3
  size 640009682
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:801b16c9421594dade7766d149e9314adb312c94db62033937687ed3a8bf4de2
3
  size 640009682
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b05eabad5d38bd773f86e444a7e0ad02897d6a6f894ea79a1e09dc6681d7675a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4b765b05d3c53e16a9d2d02ec93a05005c1486765b1f6eebc4e14ba587d1870
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4ab04f831dc42a73b689ad827233bb992b90cde844689cfc48c5f5c480364d1a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c60ebc4332412fbe392efb0482c16dbb3e31176bd7b2c64af371d9fe15a93e19
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bd9fc477f33ded6549ad4af59b4ff8a5b9d1b72a63718aa33c37c0bd747f03a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9875d5ce88db2ca80c244cdf4f526dc596d572f6e9ae1e7d390c8b845fef5468
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b005e28f81720d7af39998e1c738c455b03a1a40d3b8af1f0f1f7b251d2cd50
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14edcf96bcbc38162e4db8b93a1c2620863f043c99df62f734efd4663e4b759e
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb65275071074e7f8c415d4025dd63db767eccf1b1b13eb40847f779864aedd2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef75e921f866e994eb07f566eae5b88002af348a8ed338e0962d48b1c7d30dc1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.8136073350906372,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 1.5775203352230713,
5
  "eval_steps": 25,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -747,6 +747,139 @@
747
  "eval_samples_per_second": 50.43,
748
  "eval_steps_per_second": 13.112,
749
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
  }
751
  ],
752
  "logging_steps": 1,
@@ -770,12 +903,12 @@
770
  "should_evaluate": false,
771
  "should_log": false,
772
  "should_save": true,
773
- "should_training_stop": false
774
  },
775
  "attributes": {}
776
  }
777
  },
778
- "total_flos": 1.1175706767654912e+18,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null
 
1
  {
2
  "best_metric": 0.8136073350906372,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 1.8772491989154547,
5
  "eval_steps": 25,
6
+ "global_step": 119,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
747
  "eval_samples_per_second": 50.43,
748
  "eval_steps_per_second": 13.112,
749
  "step": 100
750
+ },
751
+ {
752
+ "epoch": 1.593295538575302,
753
+ "grad_norm": 0.23437979817390442,
754
+ "learning_rate": 1.5423681195707997e-05,
755
+ "loss": 0.8343,
756
+ "step": 101
757
+ },
758
+ {
759
+ "epoch": 1.6090707419275327,
760
+ "grad_norm": 0.23647752404212952,
761
+ "learning_rate": 1.484855926217227e-05,
762
+ "loss": 0.9165,
763
+ "step": 102
764
+ },
765
+ {
766
+ "epoch": 1.6248459452797634,
767
+ "grad_norm": 0.18379107117652893,
768
+ "learning_rate": 1.4303927768609015e-05,
769
+ "loss": 0.8544,
770
+ "step": 103
771
+ },
772
+ {
773
+ "epoch": 1.6406211486319942,
774
+ "grad_norm": 0.18732428550720215,
775
+ "learning_rate": 1.3790200300522413e-05,
776
+ "loss": 0.875,
777
+ "step": 104
778
+ },
779
+ {
780
+ "epoch": 1.6563963519842249,
781
+ "grad_norm": 0.16876737773418427,
782
+ "learning_rate": 1.330776697533392e-05,
783
+ "loss": 0.8525,
784
+ "step": 105
785
+ },
786
+ {
787
+ "epoch": 1.6721715553364556,
788
+ "grad_norm": 0.1615445613861084,
789
+ "learning_rate": 1.2856994146132542e-05,
790
+ "loss": 0.8207,
791
+ "step": 106
792
+ },
793
+ {
794
+ "epoch": 1.6879467586886863,
795
+ "grad_norm": 0.17801810801029205,
796
+ "learning_rate": 1.2438224123471442e-05,
797
+ "loss": 0.8531,
798
+ "step": 107
799
+ },
800
+ {
801
+ "epoch": 1.703721962040917,
802
+ "grad_norm": 0.18149054050445557,
803
+ "learning_rate": 1.2051774915422163e-05,
804
+ "loss": 0.9099,
805
+ "step": 108
806
+ },
807
+ {
808
+ "epoch": 1.7194971653931477,
809
+ "grad_norm": 0.1865503042936325,
810
+ "learning_rate": 1.1697939986083733e-05,
811
+ "loss": 0.8604,
812
+ "step": 109
813
+ },
814
+ {
815
+ "epoch": 1.7352723687453784,
816
+ "grad_norm": 0.18773400783538818,
817
+ "learning_rate": 1.1376988032730134e-05,
818
+ "loss": 0.8526,
819
+ "step": 110
820
+ },
821
+ {
822
+ "epoch": 1.751047572097609,
823
+ "grad_norm": 0.19192053377628326,
824
+ "learning_rate": 1.1089162781765398e-05,
825
+ "loss": 0.833,
826
+ "step": 111
827
+ },
828
+ {
829
+ "epoch": 1.7668227754498398,
830
+ "grad_norm": 0.23631104826927185,
831
+ "learning_rate": 1.0834682803641197e-05,
832
+ "loss": 0.8052,
833
+ "step": 112
834
+ },
835
+ {
836
+ "epoch": 1.7825979788020705,
837
+ "grad_norm": 0.22724592685699463,
838
+ "learning_rate": 1.0613741346877497e-05,
839
+ "loss": 0.804,
840
+ "step": 113
841
+ },
842
+ {
843
+ "epoch": 1.7983731821543012,
844
+ "grad_norm": 0.1648131012916565,
845
+ "learning_rate": 1.0426506191312355e-05,
846
+ "loss": 0.8892,
847
+ "step": 114
848
+ },
849
+ {
850
+ "epoch": 1.814148385506532,
851
+ "grad_norm": 0.19587408006191254,
852
+ "learning_rate": 1.0273119520692275e-05,
853
+ "loss": 0.8797,
854
+ "step": 115
855
+ },
856
+ {
857
+ "epoch": 1.8299235888587626,
858
+ "grad_norm": 0.20268595218658447,
859
+ "learning_rate": 1.0153697814699859e-05,
860
+ "loss": 0.8665,
861
+ "step": 116
862
+ },
863
+ {
864
+ "epoch": 1.8456987922109933,
865
+ "grad_norm": 0.2090834677219391,
866
+ "learning_rate": 1.0068331760500774e-05,
867
+ "loss": 0.8841,
868
+ "step": 117
869
+ },
870
+ {
871
+ "epoch": 1.861473995563224,
872
+ "grad_norm": 0.18287914991378784,
873
+ "learning_rate": 1.0017086183877188e-05,
874
+ "loss": 0.8637,
875
+ "step": 118
876
+ },
877
+ {
878
+ "epoch": 1.8772491989154547,
879
+ "grad_norm": 0.18256083130836487,
880
+ "learning_rate": 1e-05,
881
+ "loss": 0.8543,
882
+ "step": 119
883
  }
884
  ],
885
  "logging_steps": 1,
 
903
  "should_evaluate": false,
904
  "should_log": false,
905
  "should_save": true,
906
+ "should_training_stop": true
907
  },
908
  "attributes": {}
909
  }
910
  },
911
+ "total_flos": 1.3299091053509345e+18,
912
  "train_batch_size": 1,
913
  "trial_name": null,
914
  "trial_params": null