farmery commited on
Commit
b54a161
·
verified ·
1 Parent(s): da75442

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4b271712254e01f3a756f80c9d91649a68f589f8083c68e28efa6bf81649af5f
3
  size 13587864
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b0d11a0c9563fc9b309894a3ea72052d18d47c5d2c7383d7f135a6f7e75bbf6
3
  size 13587864
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d70e0c506c43b6dd7bfc52aa23ed7049ddf97443e93d45fefb733a105c2b0c8
3
  size 27273018
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccc19d3760e42bf952fe3f4416369525889e94276e27e85715f534215e6d1fbc
3
  size 27273018
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4fea74c51f4405caaaae971a4ccfc94a699f62e8257c2d08259a26bff3fc4eb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ad0144eca33b04b3d4f2b29d4af62fc22d673739ee2272250b1b21f6f349c2
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8287c8e13a42f467f666595aef17b0751f60576eb55d01ba59d28223687d2973
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5f01b3d4386ea634ed6767aa1edfa6779ba81e1725f3e3e0c0b9a9f92776b83
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b840cecfb7dec70c76a70327d64f5f23b2d66b0b6a1f665ecb2134abed6ccdbb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a5bb99b77e13ee757e01e8e860bc41eebc1421459e9c89d0a23b81def884f78
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:841574a58757d90b95116e1fd5d2951f131533acbad72a77bcd1a82810ab44b9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e625baf0c17e4ad56e8b11a83cbda177c07e59a3d18f926b7eb718549b22f65
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b421b30093a741efda571bdeb6770d480a0573a9ee33dd469ec29cc944e02e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a56f9e526fbefceff44eaa8fd41d4c73340d4c2a80ffbd5f860cb7089f42cc1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.9786142110824585,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 1.3034623217922607,
5
  "eval_steps": 25,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -747,6 +747,372 @@
747
  "eval_samples_per_second": 179.386,
748
  "eval_steps_per_second": 46.64,
749
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
750
  }
751
  ],
752
  "logging_steps": 1,
@@ -775,7 +1141,7 @@
775
  "attributes": {}
776
  }
777
  },
778
- "total_flos": 4.44280275468288e+16,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.9268525838851929,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
+ "epoch": 1.955193482688391,
5
  "eval_steps": 25,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
747
  "eval_samples_per_second": 179.386,
748
  "eval_steps_per_second": 46.64,
749
  "step": 100
750
+ },
751
+ {
752
+ "epoch": 1.3164969450101833,
753
+ "grad_norm": 0.7522782683372498,
754
+ "learning_rate": 0.00010497425081003482,
755
+ "loss": 2.1272,
756
+ "step": 101
757
+ },
758
+ {
759
+ "epoch": 1.329531568228106,
760
+ "grad_norm": 0.7502726316452026,
761
+ "learning_rate": 0.0001024550352588187,
762
+ "loss": 2.2586,
763
+ "step": 102
764
+ },
765
+ {
766
+ "epoch": 1.3425661914460285,
767
+ "grad_norm": 0.4233168661594391,
768
+ "learning_rate": 9.996325399626841e-05,
769
+ "loss": 1.871,
770
+ "step": 103
771
+ },
772
+ {
773
+ "epoch": 1.355600814663951,
774
+ "grad_norm": 0.5849415063858032,
775
+ "learning_rate": 9.750000000000003e-05,
776
+ "loss": 1.9998,
777
+ "step": 104
778
+ },
779
+ {
780
+ "epoch": 1.3686354378818737,
781
+ "grad_norm": 0.5961419343948364,
782
+ "learning_rate": 9.506635373462745e-05,
783
+ "loss": 2.0286,
784
+ "step": 105
785
+ },
786
+ {
787
+ "epoch": 1.3816700610997963,
788
+ "grad_norm": 0.5850256085395813,
789
+ "learning_rate": 9.266338267783541e-05,
790
+ "loss": 1.9748,
791
+ "step": 106
792
+ },
793
+ {
794
+ "epoch": 1.3947046843177189,
795
+ "grad_norm": 0.5355044007301331,
796
+ "learning_rate": 9.029214085214857e-05,
797
+ "loss": 1.9777,
798
+ "step": 107
799
+ },
800
+ {
801
+ "epoch": 1.4077393075356415,
802
+ "grad_norm": 0.40355584025382996,
803
+ "learning_rate": 8.795366836260173e-05,
804
+ "loss": 1.9819,
805
+ "step": 108
806
+ },
807
+ {
808
+ "epoch": 1.420773930753564,
809
+ "grad_norm": 0.30112820863723755,
810
+ "learning_rate": 8.564899094051614e-05,
811
+ "loss": 1.9478,
812
+ "step": 109
813
+ },
814
+ {
815
+ "epoch": 1.4338085539714869,
816
+ "grad_norm": 0.286545068025589,
817
+ "learning_rate": 8.33791194935794e-05,
818
+ "loss": 1.9474,
819
+ "step": 110
820
+ },
821
+ {
822
+ "epoch": 1.4468431771894092,
823
+ "grad_norm": 0.3926275372505188,
824
+ "learning_rate": 8.11450496624281e-05,
825
+ "loss": 1.9474,
826
+ "step": 111
827
+ },
828
+ {
829
+ "epoch": 1.459877800407332,
830
+ "grad_norm": 0.5009521245956421,
831
+ "learning_rate": 7.894776138392688e-05,
832
+ "loss": 2.0219,
833
+ "step": 112
834
+ },
835
+ {
836
+ "epoch": 1.4729124236252547,
837
+ "grad_norm": 0.6338520646095276,
838
+ "learning_rate": 7.678821846133576e-05,
839
+ "loss": 2.0353,
840
+ "step": 113
841
+ },
842
+ {
843
+ "epoch": 1.4859470468431772,
844
+ "grad_norm": 0.7013182044029236,
845
+ "learning_rate": 7.466736814155418e-05,
846
+ "loss": 1.62,
847
+ "step": 114
848
+ },
849
+ {
850
+ "epoch": 1.4989816700610998,
851
+ "grad_norm": 0.40693536400794983,
852
+ "learning_rate": 7.258614069962701e-05,
853
+ "loss": 2.5179,
854
+ "step": 115
855
+ },
856
+ {
857
+ "epoch": 1.5120162932790224,
858
+ "grad_norm": 0.31738847494125366,
859
+ "learning_rate": 7.054544903069565e-05,
860
+ "loss": 1.9209,
861
+ "step": 116
862
+ },
863
+ {
864
+ "epoch": 1.525050916496945,
865
+ "grad_norm": 0.3408113718032837,
866
+ "learning_rate": 6.854618824957157e-05,
867
+ "loss": 1.9408,
868
+ "step": 117
869
+ },
870
+ {
871
+ "epoch": 1.5380855397148676,
872
+ "grad_norm": 0.30903321504592896,
873
+ "learning_rate": 6.658923529810946e-05,
874
+ "loss": 1.9548,
875
+ "step": 118
876
+ },
877
+ {
878
+ "epoch": 1.5511201629327902,
879
+ "grad_norm": 0.30865398049354553,
880
+ "learning_rate": 6.467544856055175e-05,
881
+ "loss": 1.9355,
882
+ "step": 119
883
+ },
884
+ {
885
+ "epoch": 1.5641547861507128,
886
+ "grad_norm": 0.2949885427951813,
887
+ "learning_rate": 6.28056674870129e-05,
888
+ "loss": 1.9534,
889
+ "step": 120
890
+ },
891
+ {
892
+ "epoch": 1.5771894093686354,
893
+ "grad_norm": 0.28894004225730896,
894
+ "learning_rate": 6.098071222526847e-05,
895
+ "loss": 1.9511,
896
+ "step": 121
897
+ },
898
+ {
899
+ "epoch": 1.590224032586558,
900
+ "grad_norm": 0.2710161507129669,
901
+ "learning_rate": 5.9201383261011636e-05,
902
+ "loss": 1.9424,
903
+ "step": 122
904
+ },
905
+ {
906
+ "epoch": 1.6032586558044808,
907
+ "grad_norm": 0.30530881881713867,
908
+ "learning_rate": 5.7468461066733505e-05,
909
+ "loss": 1.9794,
910
+ "step": 123
911
+ },
912
+ {
913
+ "epoch": 1.6162932790224032,
914
+ "grad_norm": 0.3637678027153015,
915
+ "learning_rate": 5.578270575938211e-05,
916
+ "loss": 1.9562,
917
+ "step": 124
918
+ },
919
+ {
920
+ "epoch": 1.629327902240326,
921
+ "grad_norm": 0.4117651581764221,
922
+ "learning_rate": 5.4144856766949957e-05,
923
+ "loss": 1.9505,
924
+ "step": 125
925
+ },
926
+ {
927
+ "epoch": 1.629327902240326,
928
+ "eval_loss": 1.9424113035202026,
929
+ "eval_runtime": 0.2795,
930
+ "eval_samples_per_second": 178.903,
931
+ "eval_steps_per_second": 46.515,
932
+ "step": 125
933
+ },
934
+ {
935
+ "epoch": 1.6423625254582483,
936
+ "grad_norm": 0.5891804695129395,
937
+ "learning_rate": 5.255563250413657e-05,
938
+ "loss": 2.0759,
939
+ "step": 126
940
+ },
941
+ {
942
+ "epoch": 1.6553971486761712,
943
+ "grad_norm": 0.6447423696517944,
944
+ "learning_rate": 5.1015730057227994e-05,
945
+ "loss": 2.2958,
946
+ "step": 127
947
+ },
948
+ {
949
+ "epoch": 1.6684317718940935,
950
+ "grad_norm": 0.3930685818195343,
951
+ "learning_rate": 4.952582487833161e-05,
952
+ "loss": 1.817,
953
+ "step": 128
954
+ },
955
+ {
956
+ "epoch": 1.6814663951120163,
957
+ "grad_norm": 0.43411514163017273,
958
+ "learning_rate": 4.808657048910077e-05,
959
+ "loss": 1.9874,
960
+ "step": 129
961
+ },
962
+ {
963
+ "epoch": 1.694501018329939,
964
+ "grad_norm": 0.4237060546875,
965
+ "learning_rate": 4.669859819407844e-05,
966
+ "loss": 1.9388,
967
+ "step": 130
968
+ },
969
+ {
970
+ "epoch": 1.7075356415478615,
971
+ "grad_norm": 0.3899717926979065,
972
+ "learning_rate": 4.536251680378601e-05,
973
+ "loss": 1.9506,
974
+ "step": 131
975
+ },
976
+ {
977
+ "epoch": 1.7205702647657841,
978
+ "grad_norm": 0.3600156009197235,
979
+ "learning_rate": 4.407891236767926e-05,
980
+ "loss": 1.9307,
981
+ "step": 132
982
+ },
983
+ {
984
+ "epoch": 1.7336048879837067,
985
+ "grad_norm": 0.29930025339126587,
986
+ "learning_rate": 4.2848347917087386e-05,
987
+ "loss": 1.9395,
988
+ "step": 133
989
+ },
990
+ {
991
+ "epoch": 1.7466395112016293,
992
+ "grad_norm": 0.28485575318336487,
993
+ "learning_rate": 4.167136321824887e-05,
994
+ "loss": 1.9331,
995
+ "step": 134
996
+ },
997
+ {
998
+ "epoch": 1.759674134419552,
999
+ "grad_norm": 0.28593364357948303,
1000
+ "learning_rate": 4.054847453555244e-05,
1001
+ "loss": 1.9109,
1002
+ "step": 135
1003
+ },
1004
+ {
1005
+ "epoch": 1.7727087576374747,
1006
+ "grad_norm": 0.33563509583473206,
1007
+ "learning_rate": 3.948017440508607e-05,
1008
+ "loss": 1.94,
1009
+ "step": 136
1010
+ },
1011
+ {
1012
+ "epoch": 1.785743380855397,
1013
+ "grad_norm": 0.41171127557754517,
1014
+ "learning_rate": 3.846693141859465e-05,
1015
+ "loss": 1.9329,
1016
+ "step": 137
1017
+ },
1018
+ {
1019
+ "epoch": 1.79877800407332,
1020
+ "grad_norm": 0.5771033763885498,
1021
+ "learning_rate": 3.7509190017940066e-05,
1022
+ "loss": 2.0034,
1023
+ "step": 138
1024
+ },
1025
+ {
1026
+ "epoch": 1.8118126272912423,
1027
+ "grad_norm": 0.566257655620575,
1028
+ "learning_rate": 3.660737030015427e-05,
1029
+ "loss": 1.5604,
1030
+ "step": 139
1031
+ },
1032
+ {
1033
+ "epoch": 1.824847250509165,
1034
+ "grad_norm": 0.44189655780792236,
1035
+ "learning_rate": 3.576186783317092e-05,
1036
+ "loss": 2.4998,
1037
+ "step": 140
1038
+ },
1039
+ {
1040
+ "epoch": 1.8378818737270874,
1041
+ "grad_norm": 0.3839716911315918,
1042
+ "learning_rate": 3.4973053482316156e-05,
1043
+ "loss": 1.9946,
1044
+ "step": 141
1045
+ },
1046
+ {
1047
+ "epoch": 1.8509164969450103,
1048
+ "grad_norm": 0.3798718750476837,
1049
+ "learning_rate": 3.4241273247634805e-05,
1050
+ "loss": 1.9378,
1051
+ "step": 142
1052
+ },
1053
+ {
1054
+ "epoch": 1.8639511201629326,
1055
+ "grad_norm": 0.33077380061149597,
1056
+ "learning_rate": 3.356684811212336e-05,
1057
+ "loss": 1.9503,
1058
+ "step": 143
1059
+ },
1060
+ {
1061
+ "epoch": 1.8769857433808554,
1062
+ "grad_norm": 0.3309776186943054,
1063
+ "learning_rate": 3.2950073900936234e-05,
1064
+ "loss": 1.9187,
1065
+ "step": 144
1066
+ },
1067
+ {
1068
+ "epoch": 1.890020366598778,
1069
+ "grad_norm": 0.2614622116088867,
1070
+ "learning_rate": 3.2391221151627036e-05,
1071
+ "loss": 1.9367,
1072
+ "step": 145
1073
+ },
1074
+ {
1075
+ "epoch": 1.9030549898167006,
1076
+ "grad_norm": 0.27304932475090027,
1077
+ "learning_rate": 3.1890534995481836e-05,
1078
+ "loss": 1.9302,
1079
+ "step": 146
1080
+ },
1081
+ {
1082
+ "epoch": 1.9160896130346232,
1083
+ "grad_norm": 0.26790907979011536,
1084
+ "learning_rate": 3.144823504999658e-05,
1085
+ "loss": 1.9228,
1086
+ "step": 147
1087
+ },
1088
+ {
1089
+ "epoch": 1.9291242362525458,
1090
+ "grad_norm": 0.3259439468383789,
1091
+ "learning_rate": 3.10645153225455e-05,
1092
+ "loss": 1.9133,
1093
+ "step": 148
1094
+ },
1095
+ {
1096
+ "epoch": 1.9421588594704684,
1097
+ "grad_norm": 0.40042850375175476,
1098
+ "learning_rate": 3.0739544125283105e-05,
1099
+ "loss": 1.9946,
1100
+ "step": 149
1101
+ },
1102
+ {
1103
+ "epoch": 1.955193482688391,
1104
+ "grad_norm": 0.49017587304115295,
1105
+ "learning_rate": 3.047346400131691e-05,
1106
+ "loss": 1.9776,
1107
+ "step": 150
1108
+ },
1109
+ {
1110
+ "epoch": 1.955193482688391,
1111
+ "eval_loss": 1.9268525838851929,
1112
+ "eval_runtime": 0.2825,
1113
+ "eval_samples_per_second": 176.994,
1114
+ "eval_steps_per_second": 46.019,
1115
+ "step": 150
1116
  }
1117
  ],
1118
  "logging_steps": 1,
 
1141
  "attributes": {}
1142
  }
1143
  },
1144
+ "total_flos": 6.66420413202432e+16,
1145
  "train_batch_size": 1,
1146
  "trial_name": null,
1147
  "trial_params": null