alicegoesdown commited on
Commit
5fedc71
·
verified ·
1 Parent(s): e182cac

Training in progress, step 4800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:40f3328ee4a1abb22c7c87ff66605a87fc16d9c72dcaf84e33c9f372cf143309
3
  size 144748392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7603902b76dfb153996cd58830ff2244b1c85eb06a0dbd52e2c83aa4db919240
3
  size 144748392
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8420873b95a7015ac283b565db0edfa7bf6da6bb0b60d7d1c58f8831b13179c
3
  size 289690562
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a7049d10ee3c694bd2470145a49ede9bec92ae46b8a0cfd24b342730ad05e3c
3
  size 289690562
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b015303c34afce45237ba1bac3daf679d9cae234ba16bc9876bc1e5e6c2369d3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84e8894b47240b5e938409bdde12ef850338a078ac443b39608ef1f61c6d50e9
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9612206721dab747b72325faca201c5b62b462fd9a44bcf14483ab477a2645b1
3
  size 1256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:741e5a74b4b8a7e18ee94356eb03e28c998d3e32cf81a6bc98c8f2bfb8fb01d7
3
  size 1256
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8963962197303772,
3
- "best_model_checkpoint": "./output/checkpoint-4650",
4
- "epoch": 0.29636711281070743,
5
  "eval_steps": 150,
6
- "global_step": 4650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3510,6 +3510,119 @@
3510
  "eval_samples_per_second": 12.182,
3511
  "eval_steps_per_second": 12.182,
3512
  "step": 4650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3513
  }
3514
  ],
3515
  "logging_steps": 10,
@@ -3529,7 +3642,7 @@
3529
  "attributes": {}
3530
  }
3531
  },
3532
- "total_flos": 1.29777485070336e+17,
3533
  "train_batch_size": 32,
3534
  "trial_name": null,
3535
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8962866067886353,
3
+ "best_model_checkpoint": "./output/checkpoint-4800",
4
+ "epoch": 0.30592734225621415,
5
  "eval_steps": 150,
6
+ "global_step": 4800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3510
  "eval_samples_per_second": 12.182,
3511
  "eval_steps_per_second": 12.182,
3512
  "step": 4650
3513
+ },
3514
+ {
3515
+ "epoch": 0.29700446144040793,
3516
+ "grad_norm": 0.6811459064483643,
3517
+ "learning_rate": 1.4790913492997438e-06,
3518
+ "loss": 0.8875,
3519
+ "step": 4660
3520
+ },
3521
+ {
3522
+ "epoch": 0.29764181007010837,
3523
+ "grad_norm": 0.6501142382621765,
3524
+ "learning_rate": 1.3936855405155408e-06,
3525
+ "loss": 0.8959,
3526
+ "step": 4670
3527
+ },
3528
+ {
3529
+ "epoch": 0.2982791586998088,
3530
+ "grad_norm": 0.6603941321372986,
3531
+ "learning_rate": 1.3107915729816954e-06,
3532
+ "loss": 0.887,
3533
+ "step": 4680
3534
+ },
3535
+ {
3536
+ "epoch": 0.29891650732950925,
3537
+ "grad_norm": 0.6620382070541382,
3538
+ "learning_rate": 1.230412854144547e-06,
3539
+ "loss": 0.8847,
3540
+ "step": 4690
3541
+ },
3542
+ {
3543
+ "epoch": 0.2995538559592097,
3544
+ "grad_norm": 0.6567186117172241,
3545
+ "learning_rate": 1.15255268805841e-06,
3546
+ "loss": 0.8955,
3547
+ "step": 4700
3548
+ },
3549
+ {
3550
+ "epoch": 0.30019120458891013,
3551
+ "grad_norm": 0.6121493577957153,
3552
+ "learning_rate": 1.0772142752497604e-06,
3553
+ "loss": 0.8988,
3554
+ "step": 4710
3555
+ },
3556
+ {
3557
+ "epoch": 0.3008285532186106,
3558
+ "grad_norm": 0.6948884129524231,
3559
+ "learning_rate": 1.004400712585646e-06,
3560
+ "loss": 0.9086,
3561
+ "step": 4720
3562
+ },
3563
+ {
3564
+ "epoch": 0.301465901848311,
3565
+ "grad_norm": 0.6520769000053406,
3566
+ "learning_rate": 9.341149931464537e-07,
3567
+ "loss": 0.8802,
3568
+ "step": 4730
3569
+ },
3570
+ {
3571
+ "epoch": 0.30210325047801145,
3572
+ "grad_norm": 0.6592210531234741,
3573
+ "learning_rate": 8.663600061028162e-07,
3574
+ "loss": 0.9139,
3575
+ "step": 4740
3576
+ },
3577
+ {
3578
+ "epoch": 0.3027405991077119,
3579
+ "grad_norm": 0.6573534607887268,
3580
+ "learning_rate": 8.011385365968641e-07,
3581
+ "loss": 0.9039,
3582
+ "step": 4750
3583
+ },
3584
+ {
3585
+ "epoch": 0.3033779477374124,
3586
+ "grad_norm": 0.6443983316421509,
3587
+ "learning_rate": 7.384532656277698e-07,
3588
+ "loss": 0.8939,
3589
+ "step": 4760
3590
+ },
3591
+ {
3592
+ "epoch": 0.30401529636711283,
3593
+ "grad_norm": 0.6723068952560425,
3594
+ "learning_rate": 6.783067699414891e-07,
3595
+ "loss": 0.8871,
3596
+ "step": 4770
3597
+ },
3598
+ {
3599
+ "epoch": 0.30465264499681327,
3600
+ "grad_norm": 0.6483265161514282,
3601
+ "learning_rate": 6.207015219248866e-07,
3602
+ "loss": 0.8888,
3603
+ "step": 4780
3604
+ },
3605
+ {
3606
+ "epoch": 0.3052899936265137,
3607
+ "grad_norm": 0.6345191597938538,
3608
+ "learning_rate": 5.656398895040813e-07,
3609
+ "loss": 0.8899,
3610
+ "step": 4790
3611
+ },
3612
+ {
3613
+ "epoch": 0.30592734225621415,
3614
+ "grad_norm": 0.6864520907402039,
3615
+ "learning_rate": 5.131241360471217e-07,
3616
+ "loss": 0.8845,
3617
+ "step": 4800
3618
+ },
3619
+ {
3620
+ "epoch": 0.30592734225621415,
3621
+ "eval_loss": 0.8962866067886353,
3622
+ "eval_runtime": 44.5297,
3623
+ "eval_samples_per_second": 11.228,
3624
+ "eval_steps_per_second": 11.228,
3625
+ "step": 4800
3626
  }
3627
  ],
3628
  "logging_steps": 10,
 
3642
  "attributes": {}
3643
  }
3644
  },
3645
+ "total_flos": 1.3399278300168192e+17,
3646
  "train_batch_size": 32,
3647
  "trial_name": null,
3648
  "trial_params": null