besimray commited on
Commit
d9fedb7
·
verified ·
1 Parent(s): a879c16

Training in progress, step 110, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94ac5a06868d4549d8ca809aceb34d2f9203c57f47f62226617dba3c46e929b9
3
  size 125048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eeaa88bb46e23c39f7084bab91c911ab334500ac140d6c97c6230601d4b19146
3
  size 125048
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1b147b7a755f582d5d04042105d6e705c48a46753db7af1c5632e3edd95f8c0
3
  size 162868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bf0396d42d68a643bb80d3f2def95c628f4ac8bdbd074e6fbcc89467a4da1b3
3
  size 162868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67ade7c6f7575905253c20fecd77ea3dd7d528c69921646b6c58b5e79233ee72
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3e5a931b951401918bd8850c2797fac01ec2c7ed74710d75a14113c07f23dbe
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5c84ec0ff3c8c6aa13b25568668096db118f67ce80a9fa015a625446606f15d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:398198b060b9edcfe93ff59de4a929b40cbc42323ec0afb0426f8d7b821a61c1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 11.032732963562012,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 0.004519671871822106,
5
  "eval_steps": 5,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -875,6 +875,92 @@
875
  "eval_samples_per_second": 52.882,
876
  "eval_steps_per_second": 26.444,
877
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
878
  }
879
  ],
880
  "logging_steps": 1,
@@ -903,7 +989,7 @@
903
  "attributes": {}
904
  }
905
  },
906
- "total_flos": 1050673152000.0,
907
  "train_batch_size": 2,
908
  "trial_name": null,
909
  "trial_params": null
 
1
  {
2
+ "best_metric": 11.030839920043945,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-110",
4
+ "epoch": 0.004971639059004316,
5
  "eval_steps": 5,
6
+ "global_step": 110,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
875
  "eval_samples_per_second": 52.882,
876
  "eval_steps_per_second": 26.444,
877
  "step": 100
878
+ },
879
+ {
880
+ "epoch": 0.004564868590540326,
881
+ "grad_norm": 0.46195968985557556,
882
+ "learning_rate": 0.00018345732537213027,
883
+ "loss": 44.1243,
884
+ "step": 101
885
+ },
886
+ {
887
+ "epoch": 0.004610065309258548,
888
+ "grad_norm": 0.4918234348297119,
889
+ "learning_rate": 0.00018310240965131041,
890
+ "loss": 44.0833,
891
+ "step": 102
892
+ },
893
+ {
894
+ "epoch": 0.004655262027976769,
895
+ "grad_norm": 0.39288461208343506,
896
+ "learning_rate": 0.00018274407791591966,
897
+ "loss": 44.0844,
898
+ "step": 103
899
+ },
900
+ {
901
+ "epoch": 0.00470045874669499,
902
+ "grad_norm": 0.7819874882698059,
903
+ "learning_rate": 0.00018238234489557215,
904
+ "loss": 44.0727,
905
+ "step": 104
906
+ },
907
+ {
908
+ "epoch": 0.004745655465413211,
909
+ "grad_norm": 0.4996788203716278,
910
+ "learning_rate": 0.0001820172254596956,
911
+ "loss": 44.0926,
912
+ "step": 105
913
+ },
914
+ {
915
+ "epoch": 0.004745655465413211,
916
+ "eval_loss": 11.03187370300293,
917
+ "eval_runtime": 176.1674,
918
+ "eval_samples_per_second": 52.887,
919
+ "eval_steps_per_second": 26.446,
920
+ "step": 105
921
+ },
922
+ {
923
+ "epoch": 0.004790852184131432,
924
+ "grad_norm": 0.4443046748638153,
925
+ "learning_rate": 0.00018164873461691986,
926
+ "loss": 44.1211,
927
+ "step": 106
928
+ },
929
+ {
930
+ "epoch": 0.0048360489028496535,
931
+ "grad_norm": 0.6192988753318787,
932
+ "learning_rate": 0.00018127688751446027,
933
+ "loss": 44.2023,
934
+ "step": 107
935
+ },
936
+ {
937
+ "epoch": 0.004881245621567874,
938
+ "grad_norm": 0.49968671798706055,
939
+ "learning_rate": 0.00018090169943749476,
940
+ "loss": 44.1175,
941
+ "step": 108
942
+ },
943
+ {
944
+ "epoch": 0.004926442340286095,
945
+ "grad_norm": 0.5411902070045471,
946
+ "learning_rate": 0.0001805231858085356,
947
+ "loss": 44.1106,
948
+ "step": 109
949
+ },
950
+ {
951
+ "epoch": 0.004971639059004316,
952
+ "grad_norm": 0.7971486449241638,
953
+ "learning_rate": 0.00018014136218679567,
954
+ "loss": 44.1488,
955
+ "step": 110
956
+ },
957
+ {
958
+ "epoch": 0.004971639059004316,
959
+ "eval_loss": 11.030839920043945,
960
+ "eval_runtime": 176.4251,
961
+ "eval_samples_per_second": 52.81,
962
+ "eval_steps_per_second": 26.408,
963
+ "step": 110
964
  }
965
  ],
966
  "logging_steps": 1,
 
989
  "attributes": {}
990
  }
991
  },
992
+ "total_flos": 1155740467200.0,
993
  "train_batch_size": 2,
994
  "trial_name": null,
995
  "trial_params": null