alicegoesdown commited on
Commit
99d3b6f
·
verified ·
1 Parent(s): 9ccc08d

Training in progress, step 4650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0c9ee81f1a5f2aca4877f804ddc266ce1515f0b8ab7e7e66f32085a4e0efdc70
3
  size 144748392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40f3328ee4a1abb22c7c87ff66605a87fc16d9c72dcaf84e33c9f372cf143309
3
  size 144748392
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37d58fabdcf76f95f479f5ab2cb8357256bcedcf60f4ed21b9e7a84d5c76fa7e
3
  size 289690562
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8420873b95a7015ac283b565db0edfa7bf6da6bb0b60d7d1c58f8831b13179c
3
  size 289690562
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93b6d57158a43317d364194a2d857459227019cc82d49b1f5a0e5ad95a7a48ef
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b015303c34afce45237ba1bac3daf679d9cae234ba16bc9876bc1e5e6c2369d3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16bf4acbbdf4243a5b30121f973d426853acaed3d0caa7d9deb065ee076e1e1d
3
  size 1256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9612206721dab747b72325faca201c5b62b462fd9a44bcf14483ab477a2645b1
3
  size 1256
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8971238732337952,
3
- "best_model_checkpoint": "./output/checkpoint-4500",
4
- "epoch": 0.28680688336520077,
5
  "eval_steps": 150,
6
- "global_step": 4500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3397,6 +3397,119 @@
3397
  "eval_samples_per_second": 12.141,
3398
  "eval_steps_per_second": 12.141,
3399
  "step": 4500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3400
  }
3401
  ],
3402
  "logging_steps": 10,
@@ -3416,7 +3529,7 @@
3416
  "attributes": {}
3417
  }
3418
  },
3419
- "total_flos": 1.2559349120434176e+17,
3420
  "train_batch_size": 32,
3421
  "trial_name": null,
3422
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8963962197303772,
3
+ "best_model_checkpoint": "./output/checkpoint-4650",
4
+ "epoch": 0.29636711281070743,
5
  "eval_steps": 150,
6
+ "global_step": 4650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3397
  "eval_samples_per_second": 12.141,
3398
  "eval_steps_per_second": 12.141,
3399
  "step": 4500
3400
+ },
3401
+ {
3402
+ "epoch": 0.2874442319949012,
3403
+ "grad_norm": 0.6785925626754761,
3404
+ "learning_rate": 3.0589677315529044e-06,
3405
+ "loss": 0.8918,
3406
+ "step": 4510
3407
+ },
3408
+ {
3409
+ "epoch": 0.28808158062460165,
3410
+ "grad_norm": 0.640612781047821,
3411
+ "learning_rate": 2.9363630392945513e-06,
3412
+ "loss": 0.8881,
3413
+ "step": 4520
3414
+ },
3415
+ {
3416
+ "epoch": 0.2887189292543021,
3417
+ "grad_norm": 0.6500893831253052,
3418
+ "learning_rate": 2.816206774856854e-06,
3419
+ "loss": 0.9024,
3420
+ "step": 4530
3421
+ },
3422
+ {
3423
+ "epoch": 0.28935627788400253,
3424
+ "grad_norm": 0.6150489449501038,
3425
+ "learning_rate": 2.6985038773932046e-06,
3426
+ "loss": 0.8976,
3427
+ "step": 4540
3428
+ },
3429
+ {
3430
+ "epoch": 0.289993626513703,
3431
+ "grad_norm": 0.6642642021179199,
3432
+ "learning_rate": 2.583259185208714e-06,
3433
+ "loss": 0.9023,
3434
+ "step": 4550
3435
+ },
3436
+ {
3437
+ "epoch": 0.29063097514340347,
3438
+ "grad_norm": 0.6468821167945862,
3439
+ "learning_rate": 2.4704774355612943e-06,
3440
+ "loss": 0.8929,
3441
+ "step": 4560
3442
+ },
3443
+ {
3444
+ "epoch": 0.2912683237731039,
3445
+ "grad_norm": 0.6480686068534851,
3446
+ "learning_rate": 2.3601632644669536e-06,
3447
+ "loss": 0.8982,
3448
+ "step": 4570
3449
+ },
3450
+ {
3451
+ "epoch": 0.29190567240280435,
3452
+ "grad_norm": 0.6992853879928589,
3453
+ "learning_rate": 2.2523212065091723e-06,
3454
+ "loss": 0.8626,
3455
+ "step": 4580
3456
+ },
3457
+ {
3458
+ "epoch": 0.2925430210325048,
3459
+ "grad_norm": 0.7066597938537598,
3460
+ "learning_rate": 2.1469556946525706e-06,
3461
+ "loss": 0.8962,
3462
+ "step": 4590
3463
+ },
3464
+ {
3465
+ "epoch": 0.29318036966220523,
3466
+ "grad_norm": 0.6244568228721619,
3467
+ "learning_rate": 2.0440710600606595e-06,
3468
+ "loss": 0.8887,
3469
+ "step": 4600
3470
+ },
3471
+ {
3472
+ "epoch": 0.29381771829190567,
3473
+ "grad_norm": 0.6500007510185242,
3474
+ "learning_rate": 1.9436715319177956e-06,
3475
+ "loss": 0.8853,
3476
+ "step": 4610
3477
+ },
3478
+ {
3479
+ "epoch": 0.2944550669216061,
3480
+ "grad_norm": 0.6446324586868286,
3481
+ "learning_rate": 1.8457612372553348e-06,
3482
+ "loss": 0.8889,
3483
+ "step": 4620
3484
+ },
3485
+ {
3486
+ "epoch": 0.29509241555130655,
3487
+ "grad_norm": 0.6383276581764221,
3488
+ "learning_rate": 1.75034420078201e-06,
3489
+ "loss": 0.9005,
3490
+ "step": 4630
3491
+ },
3492
+ {
3493
+ "epoch": 0.295729764181007,
3494
+ "grad_norm": 0.6221078038215637,
3495
+ "learning_rate": 1.6574243447184597e-06,
3496
+ "loss": 0.8846,
3497
+ "step": 4640
3498
+ },
3499
+ {
3500
+ "epoch": 0.29636711281070743,
3501
+ "grad_norm": 0.6676984429359436,
3502
+ "learning_rate": 1.567005488636024e-06,
3503
+ "loss": 0.8919,
3504
+ "step": 4650
3505
+ },
3506
+ {
3507
+ "epoch": 0.29636711281070743,
3508
+ "eval_loss": 0.8963962197303772,
3509
+ "eval_runtime": 41.0446,
3510
+ "eval_samples_per_second": 12.182,
3511
+ "eval_steps_per_second": 12.182,
3512
+ "step": 4650
3513
  }
3514
  ],
3515
  "logging_steps": 10,
 
3529
  "attributes": {}
3530
  }
3531
  },
3532
+ "total_flos": 1.29777485070336e+17,
3533
  "train_batch_size": 32,
3534
  "trial_name": null,
3535
  "trial_params": null