ToastyPigeon commited on
Commit
46f00c8
·
verified ·
1 Parent(s): 34acba9

Training in progress, step 117, checkpoint

Browse files
Files changed (28) hide show
  1. last-checkpoint/adapter_model.safetensors +1 -1
  2. last-checkpoint/global_step117/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  3. last-checkpoint/global_step117/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  4. last-checkpoint/global_step117/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  5. last-checkpoint/global_step117/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  6. last-checkpoint/global_step117/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  7. last-checkpoint/global_step117/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
  8. last-checkpoint/global_step117/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
  9. last-checkpoint/global_step117/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
  10. last-checkpoint/global_step117/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
  11. last-checkpoint/global_step117/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
  12. last-checkpoint/global_step117/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
  13. last-checkpoint/global_step117/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
  14. last-checkpoint/global_step117/zero_pp_rank_4_mp_rank_00_model_states.pt +3 -0
  15. last-checkpoint/global_step117/zero_pp_rank_5_mp_rank_00_model_states.pt +3 -0
  16. last-checkpoint/global_step117/zero_pp_rank_6_mp_rank_00_model_states.pt +3 -0
  17. last-checkpoint/global_step117/zero_pp_rank_7_mp_rank_00_model_states.pt +3 -0
  18. last-checkpoint/latest +1 -1
  19. last-checkpoint/rng_state_0.pth +1 -1
  20. last-checkpoint/rng_state_1.pth +1 -1
  21. last-checkpoint/rng_state_2.pth +1 -1
  22. last-checkpoint/rng_state_3.pth +1 -1
  23. last-checkpoint/rng_state_4.pth +1 -1
  24. last-checkpoint/rng_state_5.pth +1 -1
  25. last-checkpoint/rng_state_6.pth +1 -1
  26. last-checkpoint/rng_state_7.pth +1 -1
  27. last-checkpoint/scheduler.pt +1 -1
  28. last-checkpoint/trainer_state.json +284 -3
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acba92f70feb375a18e9e2df852cbcd5da182fcb0197d06d0d58f8ae3ddefa59
3
  size 550593856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f881f7af17e8e692fe8e56b0bb3efdf3d1b82897cd46e96e09ffe05b97b0277
3
  size 550593856
last-checkpoint/global_step117/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b022a2efa834e66981a88b542ee77c9a22d1cc9d3836eab0ad8b25b22bedb412
3
+ size 243590464
last-checkpoint/global_step117/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecefa1c476a3924adc0f5af8e5cd57ae04d9ec8b6aa7fa424220296ed6de783a
3
+ size 243590464
last-checkpoint/global_step117/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed3b0a80362e19b37f9382e8259885df8f3431851e62c1817798aa18507ecd96
3
+ size 243590464
last-checkpoint/global_step117/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1fabbb4467f60e2d2f838e0d7904d5c62e0bebc24587b77b004961b78091a03
3
+ size 243590464
last-checkpoint/global_step117/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdcd5e95d483c18f1fa2ec0ba3084247f5324a1f3f5e1be3efa33c4b0cd6a152
3
+ size 243590464
last-checkpoint/global_step117/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d56412bbd0b12661c93f11ce1ac2c81bc4bf38436dd305b9d3708fdb58e92836
3
+ size 243590464
last-checkpoint/global_step117/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15a80a16ca24cea74922ea27bb63f4c4ef2b3122a81ea5d6b90114913aed44a3
3
+ size 243590464
last-checkpoint/global_step117/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c575316af53d3fae6b90c2e2c15e9fb6a2e444df010ff98b51357cbb4bca039
3
+ size 243590464
last-checkpoint/global_step117/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3e65a59c2801adf6a4cf0311b2a04c92f2fb5afe09639152226c1a505442df4
3
+ size 211435686
last-checkpoint/global_step117/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30c6b92167c09ff2301dac0aefd7a9b2b2e1aa0e79f037c85910a2638e0fabd1
3
+ size 211435686
last-checkpoint/global_step117/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0649d8e17c76b6fba7134c3130b44ba1f2a4432f77dd6cb4a6f543f76c3c1f59
3
+ size 211435686
last-checkpoint/global_step117/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:265ea6d998aa9864e5031343514b3b5e1183aea55e6cfaa61e36002e616bddd7
3
+ size 211435686
last-checkpoint/global_step117/zero_pp_rank_4_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:500d032f2e3ed875432838e8efefe71533914c8532b820c6ecc8bf364e037eff
3
+ size 211435686
last-checkpoint/global_step117/zero_pp_rank_5_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f116d708be0a4523d9ad04955e59ad571408245ce19a62e8e992a1001535f56d
3
+ size 211435686
last-checkpoint/global_step117/zero_pp_rank_6_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a6e0edd58f6ec4f4a55e8a0c5284c950ce0157996ba5ac7ee075f875400d983
3
+ size 211435686
last-checkpoint/global_step117/zero_pp_rank_7_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef48f47b30be1fef2e9d62aa10c338fb421585678c988fb05ffc1d97d9629bb7
3
+ size 211435686
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step78
 
1
+ global_step117
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7be6a7458fb39bc70e00e530259e7abd91632050963220c5e5345ba476614f0f
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac43c4e4aa943b1080df7e7983872659b6a36d1bde144f8865b626eeee7f434
3
  size 15920
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66026e8691785fbd170c2d9ae99d3724845775d9ad2d10b4043deaa40f54d9d3
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7c947232f2bdf46ca9ace1c9ebaf1e33912f79599e506f7f7c8ff48a11c7b15
3
  size 15920
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30d6830c96020be339f5ec273931b81b6fdcd4cef823d11d94c4fd943a776041
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cfa2ab32c5e9f8e4c49775769b133f4238f016c4e48e8b12b51160d4afbf783
3
  size 15920
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05af7d968baba27b3aae094ad8d163a6fe1341c5eb4755a791de6267a3ac4f30
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c73c8b459dde07f85f2cffa5de5fb0bf136e4a80afc50d2475bc570d14a65b2e
3
  size 15920
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6baae28919653bdf03858f680a4661508c0687d95f095cac532ad969406a1502
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9533672e6284f7cd4c7c4acb5ad621a319e75dbe8a3e90685943824a71eea36
3
  size 15920
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c83b8d7bb4cbda572628869e88e850db71c0d86c862de24379b95fd3657c08b7
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a9be2d2ec0fbf037c14fb0f5f0467f7c960e90414356feec416f8eeb1684671
3
  size 15920
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dd53716af0f7a80bcb94ac73844bb0733bac101ccb526edad0aec4835119044
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6a4539a42d09819ea8a238c4dc3e947e079d2a1390a7babff9377b3718e2a09
3
  size 15920
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00703c12bd26cde475b80092b041d86d78da55e5a8dff2fa4a8eae5ee025f617
3
  size 15920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e097a64585a03993feb0a88a81b38258f3325c6b6c17bdedb7335173ed053ac4
3
  size 15920
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:263ec1efc1043d4f3446415e08ff68d883fbbe8fdcc5976db1d7ca97cdc780b3
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:924db454eec3725afade5f0aae6c3ada154b140ee15da5b50e9bc51987b43065
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.203125,
5
  "eval_steps": 39,
6
- "global_step": 78,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -577,6 +577,287 @@
577
  "eval_samples_per_second": 1.223,
578
  "eval_steps_per_second": 0.153,
579
  "step": 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580
  }
581
  ],
582
  "logging_steps": 1,
@@ -596,7 +877,7 @@
596
  "attributes": {}
597
  }
598
  },
599
- "total_flos": 25785104596992.0,
600
  "train_batch_size": 1,
601
  "trial_name": null,
602
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.3046875,
5
  "eval_steps": 39,
6
+ "global_step": 117,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
577
  "eval_samples_per_second": 1.223,
578
  "eval_steps_per_second": 0.153,
579
  "step": 78
580
+ },
581
+ {
582
+ "epoch": 0.20572916666666666,
583
+ "grad_norm": 0.14467889640535875,
584
+ "learning_rate": 9.42907532874657e-05,
585
+ "loss": 2.4769,
586
+ "step": 79
587
+ },
588
+ {
589
+ "epoch": 0.20833333333333334,
590
+ "grad_norm": 0.13503613492125413,
591
+ "learning_rate": 9.40999585926253e-05,
592
+ "loss": 2.4346,
593
+ "step": 80
594
+ },
595
+ {
596
+ "epoch": 0.2109375,
597
+ "grad_norm": 0.1517728973900508,
598
+ "learning_rate": 9.390625136662972e-05,
599
+ "loss": 2.261,
600
+ "step": 81
601
+ },
602
+ {
603
+ "epoch": 0.21354166666666666,
604
+ "grad_norm": 0.1466563383653839,
605
+ "learning_rate": 9.370964603860753e-05,
606
+ "loss": 2.5261,
607
+ "step": 82
608
+ },
609
+ {
610
+ "epoch": 0.21614583333333334,
611
+ "grad_norm": 0.14618631155344838,
612
+ "learning_rate": 9.351015725356514e-05,
613
+ "loss": 2.4269,
614
+ "step": 83
615
+ },
616
+ {
617
+ "epoch": 0.21875,
618
+ "grad_norm": 0.15601390292939937,
619
+ "learning_rate": 9.33077998712958e-05,
620
+ "loss": 2.3773,
621
+ "step": 84
622
+ },
623
+ {
624
+ "epoch": 0.22135416666666666,
625
+ "grad_norm": 0.14415725413526403,
626
+ "learning_rate": 9.310258896527278e-05,
627
+ "loss": 2.3473,
628
+ "step": 85
629
+ },
630
+ {
631
+ "epoch": 0.22395833333333334,
632
+ "grad_norm": 0.13470412653239092,
633
+ "learning_rate": 9.289453982152653e-05,
634
+ "loss": 2.3472,
635
+ "step": 86
636
+ },
637
+ {
638
+ "epoch": 0.2265625,
639
+ "grad_norm": 0.15570776289510233,
640
+ "learning_rate": 9.2683667937506e-05,
641
+ "loss": 2.4414,
642
+ "step": 87
643
+ },
644
+ {
645
+ "epoch": 0.22916666666666666,
646
+ "grad_norm": 0.14612626399487072,
647
+ "learning_rate": 9.246998902092428e-05,
648
+ "loss": 2.4655,
649
+ "step": 88
650
+ },
651
+ {
652
+ "epoch": 0.23177083333333334,
653
+ "grad_norm": 0.18267625807633459,
654
+ "learning_rate": 9.22535189885886e-05,
655
+ "loss": 2.2771,
656
+ "step": 89
657
+ },
658
+ {
659
+ "epoch": 0.234375,
660
+ "grad_norm": 0.16065010725957032,
661
+ "learning_rate": 9.203427396521454e-05,
662
+ "loss": 2.4194,
663
+ "step": 90
664
+ },
665
+ {
666
+ "epoch": 0.23697916666666666,
667
+ "grad_norm": 0.18321820032984756,
668
+ "learning_rate": 9.181227028222508e-05,
669
+ "loss": 2.4456,
670
+ "step": 91
671
+ },
672
+ {
673
+ "epoch": 0.23958333333333334,
674
+ "grad_norm": 0.13929464732035163,
675
+ "learning_rate": 9.158752447653397e-05,
676
+ "loss": 2.3989,
677
+ "step": 92
678
+ },
679
+ {
680
+ "epoch": 0.2421875,
681
+ "grad_norm": 0.15585898969003706,
682
+ "learning_rate": 9.136005328931395e-05,
683
+ "loss": 2.4777,
684
+ "step": 93
685
+ },
686
+ {
687
+ "epoch": 0.24479166666666666,
688
+ "grad_norm": 0.14793073723102618,
689
+ "learning_rate": 9.112987366474972e-05,
690
+ "loss": 2.379,
691
+ "step": 94
692
+ },
693
+ {
694
+ "epoch": 0.24739583333333334,
695
+ "grad_norm": 0.14075868568691305,
696
+ "learning_rate": 9.089700274877574e-05,
697
+ "loss": 2.3281,
698
+ "step": 95
699
+ },
700
+ {
701
+ "epoch": 0.25,
702
+ "grad_norm": 0.15327339815586916,
703
+ "learning_rate": 9.066145788779908e-05,
704
+ "loss": 2.4517,
705
+ "step": 96
706
+ },
707
+ {
708
+ "epoch": 0.2526041666666667,
709
+ "grad_norm": 0.15505433166839452,
710
+ "learning_rate": 9.042325662740726e-05,
711
+ "loss": 2.2919,
712
+ "step": 97
713
+ },
714
+ {
715
+ "epoch": 0.2552083333333333,
716
+ "grad_norm": 0.1468856057426848,
717
+ "learning_rate": 9.018241671106134e-05,
718
+ "loss": 2.3889,
719
+ "step": 98
720
+ },
721
+ {
722
+ "epoch": 0.2578125,
723
+ "grad_norm": 0.1478488710337695,
724
+ "learning_rate": 8.993895607877418e-05,
725
+ "loss": 2.3595,
726
+ "step": 99
727
+ },
728
+ {
729
+ "epoch": 0.2604166666666667,
730
+ "grad_norm": 0.175359520279898,
731
+ "learning_rate": 8.969289286577408e-05,
732
+ "loss": 2.553,
733
+ "step": 100
734
+ },
735
+ {
736
+ "epoch": 0.2630208333333333,
737
+ "grad_norm": 0.14503182442700818,
738
+ "learning_rate": 8.9444245401154e-05,
739
+ "loss": 2.3573,
740
+ "step": 101
741
+ },
742
+ {
743
+ "epoch": 0.265625,
744
+ "grad_norm": 0.14177870868430487,
745
+ "learning_rate": 8.919303220650606e-05,
746
+ "loss": 2.3274,
747
+ "step": 102
748
+ },
749
+ {
750
+ "epoch": 0.2682291666666667,
751
+ "grad_norm": 0.1779007031093982,
752
+ "learning_rate": 8.893927199454207e-05,
753
+ "loss": 2.2008,
754
+ "step": 103
755
+ },
756
+ {
757
+ "epoch": 0.2708333333333333,
758
+ "grad_norm": 0.14578869126284813,
759
+ "learning_rate": 8.868298366769954e-05,
760
+ "loss": 2.444,
761
+ "step": 104
762
+ },
763
+ {
764
+ "epoch": 0.2734375,
765
+ "grad_norm": 0.14046914929392473,
766
+ "learning_rate": 8.842418631673365e-05,
767
+ "loss": 2.2844,
768
+ "step": 105
769
+ },
770
+ {
771
+ "epoch": 0.2760416666666667,
772
+ "grad_norm": 0.13720708755401478,
773
+ "learning_rate": 8.816289921929516e-05,
774
+ "loss": 2.5047,
775
+ "step": 106
776
+ },
777
+ {
778
+ "epoch": 0.2786458333333333,
779
+ "grad_norm": 0.13901967393067377,
780
+ "learning_rate": 8.789914183849449e-05,
781
+ "loss": 2.4602,
782
+ "step": 107
783
+ },
784
+ {
785
+ "epoch": 0.28125,
786
+ "grad_norm": 0.14165507438668587,
787
+ "learning_rate": 8.763293382145195e-05,
788
+ "loss": 2.3135,
789
+ "step": 108
790
+ },
791
+ {
792
+ "epoch": 0.2838541666666667,
793
+ "grad_norm": 0.1588574639328753,
794
+ "learning_rate": 8.73642949978341e-05,
795
+ "loss": 2.3731,
796
+ "step": 109
797
+ },
798
+ {
799
+ "epoch": 0.2864583333333333,
800
+ "grad_norm": 0.1905241886636867,
801
+ "learning_rate": 8.709324537837684e-05,
802
+ "loss": 2.4152,
803
+ "step": 110
804
+ },
805
+ {
806
+ "epoch": 0.2890625,
807
+ "grad_norm": 0.16919809232424363,
808
+ "learning_rate": 8.681980515339464e-05,
809
+ "loss": 2.2948,
810
+ "step": 111
811
+ },
812
+ {
813
+ "epoch": 0.2916666666666667,
814
+ "grad_norm": 0.15114015395920236,
815
+ "learning_rate": 8.654399469127673e-05,
816
+ "loss": 2.4309,
817
+ "step": 112
818
+ },
819
+ {
820
+ "epoch": 0.2942708333333333,
821
+ "grad_norm": 0.13890547261939354,
822
+ "learning_rate": 8.626583453696976e-05,
823
+ "loss": 2.4866,
824
+ "step": 113
825
+ },
826
+ {
827
+ "epoch": 0.296875,
828
+ "grad_norm": 0.1490334985258922,
829
+ "learning_rate": 8.598534541044747e-05,
830
+ "loss": 2.4748,
831
+ "step": 114
832
+ },
833
+ {
834
+ "epoch": 0.2994791666666667,
835
+ "grad_norm": 0.15113023404371403,
836
+ "learning_rate": 8.570254820516728e-05,
837
+ "loss": 2.3583,
838
+ "step": 115
839
+ },
840
+ {
841
+ "epoch": 0.3020833333333333,
842
+ "grad_norm": 0.14358757342098682,
843
+ "learning_rate": 8.541746398651395e-05,
844
+ "loss": 2.4005,
845
+ "step": 116
846
+ },
847
+ {
848
+ "epoch": 0.3046875,
849
+ "grad_norm": 0.1494340804949735,
850
+ "learning_rate": 8.513011399023036e-05,
851
+ "loss": 2.3249,
852
+ "step": 117
853
+ },
854
+ {
855
+ "epoch": 0.3046875,
856
+ "eval_loss": 2.4119415283203125,
857
+ "eval_runtime": 65.4329,
858
+ "eval_samples_per_second": 1.223,
859
+ "eval_steps_per_second": 0.153,
860
+ "step": 117
861
  }
862
  ],
863
  "logging_steps": 1,
 
877
  "attributes": {}
878
  }
879
  },
880
+ "total_flos": 38677656895488.0,
881
  "train_batch_size": 1,
882
  "trial_name": null,
883
  "trial_params": null