besimray commited on
Commit
ea07abc
·
verified ·
1 Parent(s): 86f5b1d

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34c32e60c57250c51934f542ff2e75446d1c4cf392e777594b077e17ea6ad239
3
  size 125048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94ac5a06868d4549d8ca809aceb34d2f9203c57f47f62226617dba3c46e929b9
3
  size 125048
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:78cd83f855bd80bf0c0a895685a76b346681eb6199b8673ce02a1764809414f1
3
  size 162868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1b147b7a755f582d5d04042105d6e705c48a46753db7af1c5632e3edd95f8c0
3
  size 162868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:820fcfa3250b03ccb2011d1c28382f78231a7cd53b56f3e8494ea4c7f9ea8506
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67ade7c6f7575905253c20fecd77ea3dd7d528c69921646b6c58b5e79233ee72
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:23b27ab0ae2b9af6f3d4c84cdaf8b0fc887acf71f8f726b270a3bce2845000a9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5c84ec0ff3c8c6aa13b25568668096db118f67ce80a9fa015a625446606f15d
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 11.034589767456055,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-90",
4
- "epoch": 0.004067704684639895,
5
  "eval_steps": 5,
6
- "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -789,6 +789,92 @@
789
  "eval_samples_per_second": 52.835,
790
  "eval_steps_per_second": 26.421,
791
  "step": 90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
792
  }
793
  ],
794
  "logging_steps": 1,
@@ -817,7 +903,7 @@
817
  "attributes": {}
818
  }
819
  },
820
- "total_flos": 945605836800.0,
821
  "train_batch_size": 2,
822
  "trial_name": null,
823
  "trial_params": null
 
1
  {
2
+ "best_metric": 11.032732963562012,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.004519671871822106,
5
  "eval_steps": 5,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
789
  "eval_samples_per_second": 52.835,
790
  "eval_steps_per_second": 26.421,
791
  "step": 90
792
+ },
793
+ {
794
+ "epoch": 0.004112901403358116,
795
+ "grad_norm": 0.5208268761634827,
796
+ "learning_rate": 0.00018681546242521786,
797
+ "loss": 44.1346,
798
+ "step": 91
799
+ },
800
+ {
801
+ "epoch": 0.004158098122076338,
802
+ "grad_norm": 0.6029201149940491,
803
+ "learning_rate": 0.00018649548579446936,
804
+ "loss": 44.152,
805
+ "step": 92
806
+ },
807
+ {
808
+ "epoch": 0.004203294840794558,
809
+ "grad_norm": 0.468414843082428,
810
+ "learning_rate": 0.0001861719536730795,
811
+ "loss": 44.117,
812
+ "step": 93
813
+ },
814
+ {
815
+ "epoch": 0.004248491559512779,
816
+ "grad_norm": 0.3942670226097107,
817
+ "learning_rate": 0.00018584487936018661,
818
+ "loss": 44.137,
819
+ "step": 94
820
+ },
821
+ {
822
+ "epoch": 0.004293688278231,
823
+ "grad_norm": 0.49822431802749634,
824
+ "learning_rate": 0.00018551427630053463,
825
+ "loss": 44.119,
826
+ "step": 95
827
+ },
828
+ {
829
+ "epoch": 0.004293688278231,
830
+ "eval_loss": 11.03354549407959,
831
+ "eval_runtime": 176.0642,
832
+ "eval_samples_per_second": 52.918,
833
+ "eval_steps_per_second": 26.462,
834
+ "step": 95
835
+ },
836
+ {
837
+ "epoch": 0.004338884996949221,
838
+ "grad_norm": 0.5527846813201904,
839
+ "learning_rate": 0.00018518015808392045,
840
+ "loss": 44.0893,
841
+ "step": 96
842
+ },
843
+ {
844
+ "epoch": 0.004384081715667443,
845
+ "grad_norm": 0.5725367665290833,
846
+ "learning_rate": 0.00018484253844463526,
847
+ "loss": 44.1162,
848
+ "step": 97
849
+ },
850
+ {
851
+ "epoch": 0.004429278434385664,
852
+ "grad_norm": 0.49278348684310913,
853
+ "learning_rate": 0.00018450143126090015,
854
+ "loss": 44.1031,
855
+ "step": 98
856
+ },
857
+ {
858
+ "epoch": 0.004474475153103885,
859
+ "grad_norm": 0.4361265301704407,
860
+ "learning_rate": 0.00018415685055429533,
861
+ "loss": 44.1386,
862
+ "step": 99
863
+ },
864
+ {
865
+ "epoch": 0.004519671871822106,
866
+ "grad_norm": 0.397714763879776,
867
+ "learning_rate": 0.00018380881048918405,
868
+ "loss": 44.1072,
869
+ "step": 100
870
+ },
871
+ {
872
+ "epoch": 0.004519671871822106,
873
+ "eval_loss": 11.032732963562012,
874
+ "eval_runtime": 176.1844,
875
+ "eval_samples_per_second": 52.882,
876
+ "eval_steps_per_second": 26.444,
877
+ "step": 100
878
  }
879
  ],
880
  "logging_steps": 1,
 
903
  "attributes": {}
904
  }
905
  },
906
+ "total_flos": 1050673152000.0,
907
  "train_batch_size": 2,
908
  "trial_name": null,
909
  "trial_params": null