besimray commited on
Commit
f95acd1
·
verified ·
1 Parent(s): 426e5dc

Training in progress, step 330, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e47c5e37aeb9ba7c9d351cd6d3f2d34e7a16fbc5e8e214fa50df6016a912f81e
3
  size 125048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bd871ffaaf005b4ae82e3fe0574e188d290bd56611e88b2ee15dcc1170f10b7
3
  size 125048
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2c8676f7ad327b1a667c04cbff92a64401964795cdbde06f9afb0439e5da7b66
3
  size 162868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3f145bed0430746d11b64162710df80efd4565776390d51f03bf26c95c602c2
3
  size 162868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f6d096910032d49eddb6029582f1caa936c833fef6de52cfa0342e08b8dcfcd2
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26b546f9e36af2b502a1f657b47e744aea84f04f078ad1d8590bd9010e8547d4
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2c4b5b0b5a3cd85dde69bc18b7d6ada534f4917a21cce1323408aed5ed9b4ef
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:319ff6bc537233144a9a1321d603147241bbe6a5f63a69d9ed8b2711e764b26e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 11.018913269042969,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-320",
4
- "epoch": 0.014462949989830737,
5
  "eval_steps": 5,
6
- "global_step": 320,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2767,6 +2767,92 @@
2767
  "eval_samples_per_second": 52.829,
2768
  "eval_steps_per_second": 26.417,
2769
  "step": 320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2770
  }
2771
  ],
2772
  "logging_steps": 1,
@@ -2795,7 +2881,7 @@
2795
  "attributes": {}
2796
  }
2797
  },
2798
- "total_flos": 3362154086400.0,
2799
  "train_batch_size": 2,
2800
  "trial_name": null,
2801
  "trial_params": null
 
1
  {
2
+ "best_metric": 11.018866539001465,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-330",
4
+ "epoch": 0.01491491717701295,
5
  "eval_steps": 5,
6
+ "global_step": 330,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2767
  "eval_samples_per_second": 52.829,
2768
  "eval_steps_per_second": 26.417,
2769
  "step": 320
2770
+ },
2771
+ {
2772
+ "epoch": 0.014508146708548959,
2773
+ "grad_norm": 0.47462332248687744,
2774
+ "learning_rate": 5.893623390640621e-05,
2775
+ "loss": 44.0712,
2776
+ "step": 321
2777
+ },
2778
+ {
2779
+ "epoch": 0.01455334342726718,
2780
+ "grad_norm": 0.3999902307987213,
2781
+ "learning_rate": 5.835249009564012e-05,
2782
+ "loss": 44.0985,
2783
+ "step": 322
2784
+ },
2785
+ {
2786
+ "epoch": 0.014598540145985401,
2787
+ "grad_norm": 0.5390244126319885,
2788
+ "learning_rate": 5.777045825088404e-05,
2789
+ "loss": 44.0947,
2790
+ "step": 323
2791
+ },
2792
+ {
2793
+ "epoch": 0.014643736864703622,
2794
+ "grad_norm": 0.5316472053527832,
2795
+ "learning_rate": 5.7190162297187475e-05,
2796
+ "loss": 44.0887,
2797
+ "step": 324
2798
+ },
2799
+ {
2800
+ "epoch": 0.014688933583421843,
2801
+ "grad_norm": 0.43537721037864685,
2802
+ "learning_rate": 5.6611626088244194e-05,
2803
+ "loss": 44.1142,
2804
+ "step": 325
2805
+ },
2806
+ {
2807
+ "epoch": 0.014688933583421843,
2808
+ "eval_loss": 11.018884658813477,
2809
+ "eval_runtime": 176.0785,
2810
+ "eval_samples_per_second": 52.914,
2811
+ "eval_steps_per_second": 26.46,
2812
+ "step": 325
2813
+ },
2814
+ {
2815
+ "epoch": 0.014734130302140065,
2816
+ "grad_norm": 0.42780250310897827,
2817
+ "learning_rate": 5.60348734054118e-05,
2818
+ "loss": 44.0567,
2819
+ "step": 326
2820
+ },
2821
+ {
2822
+ "epoch": 0.014779327020858286,
2823
+ "grad_norm": 0.418026864528656,
2824
+ "learning_rate": 5.545992795673408e-05,
2825
+ "loss": 44.0578,
2826
+ "step": 327
2827
+ },
2828
+ {
2829
+ "epoch": 0.014824523739576507,
2830
+ "grad_norm": 0.507036030292511,
2831
+ "learning_rate": 5.488681337596653e-05,
2832
+ "loss": 44.0708,
2833
+ "step": 328
2834
+ },
2835
+ {
2836
+ "epoch": 0.014869720458294728,
2837
+ "grad_norm": 0.4779205322265625,
2838
+ "learning_rate": 5.431555322160483e-05,
2839
+ "loss": 44.0879,
2840
+ "step": 329
2841
+ },
2842
+ {
2843
+ "epoch": 0.01491491717701295,
2844
+ "grad_norm": 0.48253196477890015,
2845
+ "learning_rate": 5.37461709759165e-05,
2846
+ "loss": 44.005,
2847
+ "step": 330
2848
+ },
2849
+ {
2850
+ "epoch": 0.01491491717701295,
2851
+ "eval_loss": 11.018866539001465,
2852
+ "eval_runtime": 176.4141,
2853
+ "eval_samples_per_second": 52.813,
2854
+ "eval_steps_per_second": 26.409,
2855
+ "step": 330
2856
  }
2857
  ],
2858
  "logging_steps": 1,
 
2881
  "attributes": {}
2882
  }
2883
  },
2884
+ "total_flos": 3467221401600.0,
2885
  "train_batch_size": 2,
2886
  "trial_name": null,
2887
  "trial_params": null