brixeus commited on
Commit
fc2ae75
·
verified ·
1 Parent(s): a1a1171

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29823f4e08b18743fec2278d0a24fba91846a1bbe1838f5c71ac01c15fea6da1
3
  size 1163996488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c455923ab770e48cc7fe8b3335bf5b07469208adc770e09d20b199364122c4e
3
  size 1163996488
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2febce13ec52a548e34b9db0f50c7fb2dfb945379b97f53fb52a73c2635c2f2c
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69011464ee6bf6a2e340fa6f5ccb7dac9287a6b7576359b1b0f5fadc01fa0606
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4787c2c04c07c1ae6e2c20c42952d1e91343f26f99f548d3534a1670bc4a5f0c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cecbe0d896bc69bdfaae08801620d68150df363e8dc6b04330ef464a0d971be6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d754412c61116546142914503e7369d0cc35d3c380a07e5218f595d76b6d96
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.613823652267456,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
- "epoch": 0.338409475465313,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -389,6 +389,126 @@
389
  "eval_samples_per_second": 13.426,
390
  "eval_steps_per_second": 3.361,
391
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
  }
393
  ],
394
  "logging_steps": 3,
@@ -412,12 +532,12 @@
412
  "should_evaluate": false,
413
  "should_log": false,
414
  "should_save": true,
415
- "should_training_stop": false
416
  },
417
  "attributes": {}
418
  }
419
  },
420
- "total_flos": 1.995744869351424e+17,
421
  "train_batch_size": 8,
422
  "trial_name": null,
423
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.5217487812042236,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-200",
4
+ "epoch": 0.4512126339537507,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
389
  "eval_samples_per_second": 13.426,
390
  "eval_steps_per_second": 3.361,
391
  "step": 150
392
+ },
393
+ {
394
+ "epoch": 0.34517766497461927,
395
+ "grad_norm": 1.9749424457550049,
396
+ "learning_rate": 1.435357758543015e-05,
397
+ "loss": 2.1616,
398
+ "step": 153
399
+ },
400
+ {
401
+ "epoch": 0.35194585448392557,
402
+ "grad_norm": 2.1206893920898438,
403
+ "learning_rate": 1.2658926150792322e-05,
404
+ "loss": 2.1542,
405
+ "step": 156
406
+ },
407
+ {
408
+ "epoch": 0.3587140439932318,
409
+ "grad_norm": 2.122969150543213,
410
+ "learning_rate": 1.1056136061894384e-05,
411
+ "loss": 1.9048,
412
+ "step": 159
413
+ },
414
+ {
415
+ "epoch": 0.36548223350253806,
416
+ "grad_norm": 2.1160035133361816,
417
+ "learning_rate": 9.549150281252633e-06,
418
+ "loss": 1.8025,
419
+ "step": 162
420
+ },
421
+ {
422
+ "epoch": 0.37225042301184436,
423
+ "grad_norm": 2.0058743953704834,
424
+ "learning_rate": 8.141676086873572e-06,
425
+ "loss": 1.7525,
426
+ "step": 165
427
+ },
428
+ {
429
+ "epoch": 0.3790186125211506,
430
+ "grad_norm": 1.7620965242385864,
431
+ "learning_rate": 6.837175952121306e-06,
432
+ "loss": 1.4851,
433
+ "step": 168
434
+ },
435
+ {
436
+ "epoch": 0.38578680203045684,
437
+ "grad_norm": 2.089890718460083,
438
+ "learning_rate": 5.6388590278194096e-06,
439
+ "loss": 1.5668,
440
+ "step": 171
441
+ },
442
+ {
443
+ "epoch": 0.3925549915397631,
444
+ "grad_norm": 1.8286525011062622,
445
+ "learning_rate": 4.549673247541875e-06,
446
+ "loss": 1.3543,
447
+ "step": 174
448
+ },
449
+ {
450
+ "epoch": 0.3993231810490694,
451
+ "grad_norm": 2.1308631896972656,
452
+ "learning_rate": 3.5722980755146517e-06,
453
+ "loss": 1.4603,
454
+ "step": 177
455
+ },
456
+ {
457
+ "epoch": 0.40609137055837563,
458
+ "grad_norm": 2.0532541275024414,
459
+ "learning_rate": 2.7091379149682685e-06,
460
+ "loss": 1.4869,
461
+ "step": 180
462
+ },
463
+ {
464
+ "epoch": 0.4128595600676819,
465
+ "grad_norm": 2.347409963607788,
466
+ "learning_rate": 1.962316193157593e-06,
467
+ "loss": 1.2985,
468
+ "step": 183
469
+ },
470
+ {
471
+ "epoch": 0.4196277495769882,
472
+ "grad_norm": 1.9288790225982666,
473
+ "learning_rate": 1.333670137599713e-06,
474
+ "loss": 1.4297,
475
+ "step": 186
476
+ },
477
+ {
478
+ "epoch": 0.4263959390862944,
479
+ "grad_norm": 2.8015472888946533,
480
+ "learning_rate": 8.247462563808817e-07,
481
+ "loss": 1.543,
482
+ "step": 189
483
+ },
484
+ {
485
+ "epoch": 0.43316412859560066,
486
+ "grad_norm": 2.930189371109009,
487
+ "learning_rate": 4.367965336512403e-07,
488
+ "loss": 1.3146,
489
+ "step": 192
490
+ },
491
+ {
492
+ "epoch": 0.43993231810490696,
493
+ "grad_norm": 2.7898826599121094,
494
+ "learning_rate": 1.7077534966650766e-07,
495
+ "loss": 1.4502,
496
+ "step": 195
497
+ },
498
+ {
499
+ "epoch": 0.4467005076142132,
500
+ "grad_norm": 3.787993907928467,
501
+ "learning_rate": 2.7337132953697554e-08,
502
+ "loss": 1.4229,
503
+ "step": 198
504
+ },
505
+ {
506
+ "epoch": 0.4512126339537507,
507
+ "eval_loss": 1.5217487812042236,
508
+ "eval_runtime": 55.7179,
509
+ "eval_samples_per_second": 13.407,
510
+ "eval_steps_per_second": 3.356,
511
+ "step": 200
512
  }
513
  ],
514
  "logging_steps": 3,
 
532
  "should_evaluate": false,
533
  "should_log": false,
534
  "should_save": true,
535
+ "should_training_stop": true
536
  },
537
  "attributes": {}
538
  }
539
  },
540
+ "total_flos": 2.660993159135232e+17,
541
  "train_batch_size": 8,
542
  "trial_name": null,
543
  "trial_params": null