mtzig commited on
Commit
1ee29ea
·
verified ·
1 Parent(s): e4ead29

Training in progress, step 813, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a652c5fae11ea2adca853738ad01096d957c3c9179219d103154b7cc54bf3ad3
3
  size 13648688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82f42dafde8f43d8daef05a4b4d895dfe5a001795fdca9d06910d9afd94e8d12
3
  size 13648688
last-checkpoint/global_step813/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bad71d1ebc275d7bb3182ad2c097070e419821346aa2bb8bb1e7246ee424872
3
+ size 20450800
last-checkpoint/global_step813/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00a774541938625f33c41bbeacf87ab4f887899f2f6a4eb4168367163b287002
3
+ size 20450800
last-checkpoint/global_step813/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e36f27eee3a87894e7dae0012f2d579a1718c83fdfb269d8a1363c039ce9559
3
+ size 20450800
last-checkpoint/global_step813/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76d12ae5970e0e9ce7b3fb316f04c6ca531d00f5d8adac41f6ae85bb1e7e818b
3
+ size 20450800
last-checkpoint/global_step813/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbab6ee33dad0697672121182b18248e538e76ad2257d4886e8a8c975d1b4bf3
3
+ size 152238
last-checkpoint/global_step813/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bd4459ed95d081ddede2eb7e8ea970ecf7c6fe0ae8e806ef35f343969d0b393
3
+ size 152238
last-checkpoint/global_step813/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a83ddf22a5d52c4c48fbcddb99a7578eee212f0e265959f0eb8e814f70d197ba
3
+ size 152238
last-checkpoint/global_step813/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22651ffccbfea86821a3c658c3091101f4071a2b3a731393b19db211cedf6049
3
+ size 152238
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step800
 
1
+ global_step813
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87cb02e8cd64657a53dc5b5e254ac5f48ae8d194a60165370a55a19b22db6f41
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d52343dd98f7e84cc6844ba76b296b31c694fd088ebee51a2f941bc914e318c
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e24d98488b8ee4c9ae33c4ceab244d33a0b8840dd5bba055edd1d451f13ad848
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:948e8d7f2d8d0352f1c9bc8f2562829625ff1fe1528c837b5de4d5af696571ae
3
  size 14960
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d2cd121bedf7b6b76721153b7cef441f373f23cfeb4d690fa2511614d4a9fde
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:700dfabfd03da7587a28bec96ee413548ed6d788958615735d670351ebea332d
3
  size 14960
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e241a5faad1af0a2c7b86616994b8a556b97dd87a2d34edfc821f52d2592e9ec
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edcbb08a6204513b16df25179848ad245c36614faceacb1e278073cf0cd7da20
3
  size 14960
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a7ed681055e4d6c45b04422cd95ffb42b03b2ad7470799d0056fdac3772ffaa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb808850397266aba5d69abcc6e0dbdcd4eae38bfc8a304204140ecb9e3ba047
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9837073470642483,
5
  "eval_steps": 40,
6
- "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5859,6 +5859,97 @@
5859
  "eval_samples_per_second": 2.165,
5860
  "eval_steps_per_second": 0.173,
5861
  "step": 800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5862
  }
5863
  ],
5864
  "logging_steps": 1,
@@ -5873,12 +5964,12 @@
5873
  "should_evaluate": false,
5874
  "should_log": false,
5875
  "should_save": true,
5876
- "should_training_stop": false
5877
  },
5878
  "attributes": {}
5879
  }
5880
  },
5881
- "total_flos": 785744118972416.0,
5882
  "train_batch_size": 4,
5883
  "trial_name": null,
5884
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.9996925914540424,
5
  "eval_steps": 40,
6
+ "global_step": 813,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5859
  "eval_samples_per_second": 2.165,
5860
  "eval_steps_per_second": 0.173,
5861
  "step": 800
5862
+ },
5863
+ {
5864
+ "epoch": 0.9849369812480787,
5865
+ "grad_norm": 0.4737970220793768,
5866
+ "learning_rate": 1.329539452907036e-08,
5867
+ "loss": 0.2434,
5868
+ "step": 801
5869
+ },
5870
+ {
5871
+ "epoch": 0.986166615431909,
5872
+ "grad_norm": 0.27628624700617177,
5873
+ "learning_rate": 1.117222008286456e-08,
5874
+ "loss": 0.1413,
5875
+ "step": 802
5876
+ },
5877
+ {
5878
+ "epoch": 0.9873962496157394,
5879
+ "grad_norm": 0.4484471355218967,
5880
+ "learning_rate": 9.233538192963132e-09,
5881
+ "loss": 0.2062,
5882
+ "step": 803
5883
+ },
5884
+ {
5885
+ "epoch": 0.9886258837995696,
5886
+ "grad_norm": 0.3948729656766095,
5887
+ "learning_rate": 7.479384666608802e-09,
5888
+ "loss": 0.1296,
5889
+ "step": 804
5890
+ },
5891
+ {
5892
+ "epoch": 0.9898555179834,
5893
+ "grad_norm": 0.32852852414095784,
5894
+ "learning_rate": 5.909791902823925e-09,
5895
+ "loss": 0.219,
5896
+ "step": 805
5897
+ },
5898
+ {
5899
+ "epoch": 0.9910851521672303,
5900
+ "grad_norm": 0.42167582713648133,
5901
+ "learning_rate": 4.524788891816512e-09,
5902
+ "loss": 0.1242,
5903
+ "step": 806
5904
+ },
5905
+ {
5906
+ "epoch": 0.9923147863510605,
5907
+ "grad_norm": 0.4186302604931252,
5908
+ "learning_rate": 3.3244012144395545e-09,
5909
+ "loss": 0.2346,
5910
+ "step": 807
5911
+ },
5912
+ {
5913
+ "epoch": 0.9935444205348909,
5914
+ "grad_norm": 0.5241641959513683,
5915
+ "learning_rate": 2.3086510417225093e-09,
5916
+ "loss": 0.22,
5917
+ "step": 808
5918
+ },
5919
+ {
5920
+ "epoch": 0.9947740547187212,
5921
+ "grad_norm": 0.5432971120883086,
5922
+ "learning_rate": 1.4775571344605167e-09,
5923
+ "loss": 0.1391,
5924
+ "step": 809
5925
+ },
5926
+ {
5927
+ "epoch": 0.9960036889025515,
5928
+ "grad_norm": 0.36894298305824874,
5929
+ "learning_rate": 8.311348428657884e-10,
5930
+ "loss": 0.1366,
5931
+ "step": 810
5932
+ },
5933
+ {
5934
+ "epoch": 0.9972333230863818,
5935
+ "grad_norm": 0.49102427432843837,
5936
+ "learning_rate": 3.6939610628894396e-10,
5937
+ "loss": 0.3079,
5938
+ "step": 811
5939
+ },
5940
+ {
5941
+ "epoch": 0.9984629572702121,
5942
+ "grad_norm": 0.6049830604783123,
5943
+ "learning_rate": 9.234945299363418e-11,
5944
+ "loss": 0.2577,
5945
+ "step": 812
5946
+ },
5947
+ {
5948
+ "epoch": 0.9996925914540424,
5949
+ "grad_norm": 0.4658029933791719,
5950
+ "learning_rate": 0.0,
5951
+ "loss": 0.2186,
5952
+ "step": 813
5953
  }
5954
  ],
5955
  "logging_steps": 1,
 
5964
  "should_evaluate": false,
5965
  "should_log": false,
5966
  "should_save": true,
5967
+ "should_training_stop": true
5968
  },
5969
  "attributes": {}
5970
  }
5971
  },
5972
+ "total_flos": 798189487620096.0,
5973
  "train_batch_size": 4,
5974
  "trial_name": null,
5975
  "trial_params": null