Nexspear commited on
Commit
cab8b08
·
verified ·
1 Parent(s): 168c7ad

Training in progress, step 340, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d6d1c4fde2f266b83ee75a31f2c1cac5711ee98e37af42b245790580334c90c2
3
  size 161533192
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7926dd99c73d35a13f1101207b9754cfc1916a65b0abe3790ea09597e34ef6fb
3
  size 161533192
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3166a88f4138ad1a95a566b53036461b3f0a9a711d159eb1e425081fb035c0c3
3
  size 82461044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28165d9a0a9493a5d11b701db688ae529827d3d964848bb01ffd43a109f6204d
3
  size 82461044
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54cea1660f239fc9f7c87fe7973e1ccf8c48c0413ce43e9a2a732dfb23d322f6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c10ef2dae6ff9f7e69def029d8126d94a9148c8620584ca8bb5b004d68fc930
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:542eeb761eff9bd2c88163850a5018d7ed947bdab57ea917e6e376b6cb0c0259
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6999f9aad8d44fbf7db1d80d56ad86630abb8e28a7187e80ed24f8546462146
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.10316925151719487,
5
  "eval_steps": 34,
6
- "global_step": 306,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -801,6 +801,91 @@
801
  "eval_samples_per_second": 14.618,
802
  "eval_steps_per_second": 1.829,
803
  "step": 306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
804
  }
805
  ],
806
  "logging_steps": 3,
@@ -820,7 +905,7 @@
820
  "attributes": {}
821
  }
822
  },
823
- "total_flos": 4.27811680316031e+17,
824
  "train_batch_size": 8,
825
  "trial_name": null,
826
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.11463250168577209,
5
  "eval_steps": 34,
6
+ "global_step": 340,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
801
  "eval_samples_per_second": 14.618,
802
  "eval_steps_per_second": 1.829,
803
  "step": 306
804
+ },
805
+ {
806
+ "epoch": 0.10418071476736346,
807
+ "grad_norm": 1.1861456632614136,
808
+ "learning_rate": 6.421379363065142e-06,
809
+ "loss": 0.3585,
810
+ "step": 309
811
+ },
812
+ {
813
+ "epoch": 0.10519217801753203,
814
+ "grad_norm": 1.4933161735534668,
815
+ "learning_rate": 6.022586521156715e-06,
816
+ "loss": 0.3932,
817
+ "step": 312
818
+ },
819
+ {
820
+ "epoch": 0.10620364126770061,
821
+ "grad_norm": 1.3899950981140137,
822
+ "learning_rate": 5.634875954308638e-06,
823
+ "loss": 0.5755,
824
+ "step": 315
825
+ },
826
+ {
827
+ "epoch": 0.10721510451786918,
828
+ "grad_norm": 1.18565833568573,
829
+ "learning_rate": 5.258474074573877e-06,
830
+ "loss": 0.3489,
831
+ "step": 318
832
+ },
833
+ {
834
+ "epoch": 0.10822656776803777,
835
+ "grad_norm": 1.3438397645950317,
836
+ "learning_rate": 4.893600690050579e-06,
837
+ "loss": 0.3942,
838
+ "step": 321
839
+ },
840
+ {
841
+ "epoch": 0.10923803101820634,
842
+ "grad_norm": 1.1274303197860718,
843
+ "learning_rate": 4.540468876520323e-06,
844
+ "loss": 0.3829,
845
+ "step": 324
846
+ },
847
+ {
848
+ "epoch": 0.11024949426837491,
849
+ "grad_norm": 1.4088205099105835,
850
+ "learning_rate": 4.199284853017896e-06,
851
+ "loss": 0.4908,
852
+ "step": 327
853
+ },
854
+ {
855
+ "epoch": 0.1112609575185435,
856
+ "grad_norm": 1.07282292842865,
857
+ "learning_rate": 3.8702478614051355e-06,
858
+ "loss": 0.4099,
859
+ "step": 330
860
+ },
861
+ {
862
+ "epoch": 0.11227242076871206,
863
+ "grad_norm": 1.2825994491577148,
864
+ "learning_rate": 3.5535500500193357e-06,
865
+ "loss": 0.4394,
866
+ "step": 333
867
+ },
868
+ {
869
+ "epoch": 0.11328388401888065,
870
+ "grad_norm": 1.3308430910110474,
871
+ "learning_rate": 3.249376361464021e-06,
872
+ "loss": 0.4331,
873
+ "step": 336
874
+ },
875
+ {
876
+ "epoch": 0.11429534726904922,
877
+ "grad_norm": 1.5105128288269043,
878
+ "learning_rate": 2.957904424607652e-06,
879
+ "loss": 0.4308,
880
+ "step": 339
881
+ },
882
+ {
883
+ "epoch": 0.11463250168577209,
884
+ "eval_loss": 0.4016662836074829,
885
+ "eval_runtime": 341.6809,
886
+ "eval_samples_per_second": 14.619,
887
+ "eval_steps_per_second": 1.829,
888
+ "step": 340
889
  }
890
  ],
891
  "logging_steps": 3,
 
905
  "attributes": {}
906
  }
907
  },
908
+ "total_flos": 4.7534631146225664e+17,
909
  "train_batch_size": 8,
910
  "trial_name": null,
911
  "trial_params": null