timmmyo commited on
Commit
5bdcb14
·
verified ·
1 Parent(s): 528c6b5

Training in progress, step 4800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e488b62025d41986709534a2dc201f971d6ddeec47a4c48635d7dddcb5f028b
3
  size 933293280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c4c8e6d007d3d8bb8f8f9bbd9ef836f6fb24275cfd50f0180fe1758da6364b4
3
  size 933293280
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd03821db8049af2b171cdd8a983a9b8ab8db734bb230e14cce66182b0badc1d
3
  size 1866844821
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3b81f8d9a6373fbc49d9c16ddea636467baebe74e82efe9496675b018e5ca15
3
  size 1866844821
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c6d2f85bebee57b047e39efab752a9eab24d277780521dfed0170268348b7113
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd74d8f0f7764961530a74e879e048f30ba690128c8b40b38488d20c197a86ef
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2cdcf164ce6993e9af16adc4343d6062859956f040c1fbcd7e250c216a5a5066
3
  size 1256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5232a4a5f739042715cc636afe138ca1778707ce4414342447394498c78343ab
3
  size 1256
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.14619897305965424,
3
- "best_model_checkpoint": "./output/checkpoint-4650",
4
- "epoch": 0.17283675289919714,
5
  "eval_steps": 150,
6
- "global_step": 4650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3510,6 +3510,119 @@
3510
  "eval_samples_per_second": 13.297,
3511
  "eval_steps_per_second": 13.297,
3512
  "step": 4650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3513
  }
3514
  ],
3515
  "logging_steps": 10,
@@ -3529,7 +3642,7 @@
3529
  "attributes": {}
3530
  }
3531
  },
3532
- "total_flos": 1.30014571180032e+17,
3533
  "train_batch_size": 4,
3534
  "trial_name": null,
3535
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.14507263898849487,
3
+ "best_model_checkpoint": "./output/checkpoint-4800",
4
+ "epoch": 0.1784121320249777,
5
  "eval_steps": 150,
6
+ "global_step": 4800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3510
  "eval_samples_per_second": 13.297,
3511
  "eval_steps_per_second": 13.297,
3512
  "step": 4650
3513
+ },
3514
+ {
3515
+ "epoch": 0.17320844484091585,
3516
+ "grad_norm": 2.0867106914520264,
3517
+ "learning_rate": 6.817354714980488e-07,
3518
+ "loss": 0.1318,
3519
+ "step": 4660
3520
+ },
3521
+ {
3522
+ "epoch": 0.17358013678263456,
3523
+ "grad_norm": 17.901288986206055,
3524
+ "learning_rate": 6.758453247522666e-07,
3525
+ "loss": 0.102,
3526
+ "step": 4670
3527
+ },
3528
+ {
3529
+ "epoch": 0.17395182872435325,
3530
+ "grad_norm": 9.688858032226562,
3531
+ "learning_rate": 6.699669035115747e-07,
3532
+ "loss": 0.0933,
3533
+ "step": 4680
3534
+ },
3535
+ {
3536
+ "epoch": 0.17432352066607196,
3537
+ "grad_norm": 80.86397552490234,
3538
+ "learning_rate": 6.641004494148456e-07,
3539
+ "loss": 0.0704,
3540
+ "step": 4690
3541
+ },
3542
+ {
3543
+ "epoch": 0.17469521260779067,
3544
+ "grad_norm": 8.16674518585205,
3545
+ "learning_rate": 6.5824620360903e-07,
3546
+ "loss": 0.1367,
3547
+ "step": 4700
3548
+ },
3549
+ {
3550
+ "epoch": 0.17506690454950938,
3551
+ "grad_norm": 17.89749526977539,
3552
+ "learning_rate": 6.524044067392435e-07,
3553
+ "loss": 0.0761,
3554
+ "step": 4710
3555
+ },
3556
+ {
3557
+ "epoch": 0.17543859649122806,
3558
+ "grad_norm": 17.662899017333984,
3559
+ "learning_rate": 6.465752989388749e-07,
3560
+ "loss": 0.074,
3561
+ "step": 4720
3562
+ },
3563
+ {
3564
+ "epoch": 0.17581028843294677,
3565
+ "grad_norm": 4.190911293029785,
3566
+ "learning_rate": 6.407591198197149e-07,
3567
+ "loss": 0.0888,
3568
+ "step": 4730
3569
+ },
3570
+ {
3571
+ "epoch": 0.17618198037466548,
3572
+ "grad_norm": 24.806283950805664,
3573
+ "learning_rate": 6.349561084621062e-07,
3574
+ "loss": 0.1334,
3575
+ "step": 4740
3576
+ },
3577
+ {
3578
+ "epoch": 0.1765536723163842,
3579
+ "grad_norm": 11.839107513427734,
3580
+ "learning_rate": 6.29166503405117e-07,
3581
+ "loss": 0.0903,
3582
+ "step": 4750
3583
+ },
3584
+ {
3585
+ "epoch": 0.17692536425810287,
3586
+ "grad_norm": 6.1233978271484375,
3587
+ "learning_rate": 6.233905426367347e-07,
3588
+ "loss": 0.0758,
3589
+ "step": 4760
3590
+ },
3591
+ {
3592
+ "epoch": 0.17729705619982158,
3593
+ "grad_norm": 6.106809616088867,
3594
+ "learning_rate": 6.176284635840844e-07,
3595
+ "loss": 0.1035,
3596
+ "step": 4770
3597
+ },
3598
+ {
3599
+ "epoch": 0.1776687481415403,
3600
+ "grad_norm": 1.4445923566818237,
3601
+ "learning_rate": 6.118805031036666e-07,
3602
+ "loss": 0.069,
3603
+ "step": 4780
3604
+ },
3605
+ {
3606
+ "epoch": 0.178040440083259,
3607
+ "grad_norm": 19.246662139892578,
3608
+ "learning_rate": 6.061468974716237e-07,
3609
+ "loss": 0.0968,
3610
+ "step": 4790
3611
+ },
3612
+ {
3613
+ "epoch": 0.1784121320249777,
3614
+ "grad_norm": 10.475799560546875,
3615
+ "learning_rate": 6.004278823740262e-07,
3616
+ "loss": 0.0921,
3617
+ "step": 4800
3618
+ },
3619
+ {
3620
+ "epoch": 0.1784121320249777,
3621
+ "eval_loss": 0.14507263898849487,
3622
+ "eval_runtime": 39.6799,
3623
+ "eval_samples_per_second": 12.601,
3624
+ "eval_steps_per_second": 12.601,
3625
+ "step": 4800
3626
  }
3627
  ],
3628
  "logging_steps": 10,
 
3642
  "attributes": {}
3643
  }
3644
  },
3645
+ "total_flos": 1.342970275358638e+17,
3646
  "train_batch_size": 4,
3647
  "trial_name": null,
3648
  "trial_params": null