cwaud commited on
Commit
3c800d2
1 Parent(s): 3759ddd

Training in progress, step 83, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71c2d294e85b0ae32deab70c3aa5243cb11c08e1d82d08535ae2e035f16391e0
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efca5ac27a0c8656909a7422ee16335311a7f780e8c61b6d317d60ac53edc071
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2ae4363572d84b5ed37ece23cab768d11baae80a27e64eb7a0233c8dbedb1084
3
  size 671466706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0aa6cc1d46f55508da995acdcb8eb065722938b81e5160de4ea1e1e3d6afec8
3
  size 671466706
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4dd60aa48ff4b0c5681667e4bac1eda701a82305811fa88b41f02f39447f3e79
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65f047353f5ee09e16f21ce9336a8410cb538b6a0d98e0dfff2ce2ff5af26c48
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:43588fda80d961cb4666ec04fcbe5427d597b97b08c9c6ace0b5419fa71de599
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5e9702c50a0713ada5f38b5e1e87fe9f8fe4cabcfa98fc9a65c9c491f51226a
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0185dc93a4892f7deb203c00b7d00a4b84b4e35d61b0005c10e564ac482dd90c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e09f8f070e6ba322b6856908fabd4eadaeb3e8040782a2129aaba24a0fc1b3bb
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3b75b6145f802aceb9618ccad7b159de053a94ee8d0cde6c3217554c60dafc8d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e3c37b6be47040b31f073b29367472651092aec12d5294822a8aad973c54d5a
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0925e9b4a1c852b6b439be21a6791f6589a1c819669446d27fb3f562a9d12d34
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de625b9c46b02e26c7087167b584f523763df8c1700128401bca00e401a1eac6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6395394802093506,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.008535017576801822,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,245 @@
381
  "eval_samples_per_second": 13.999,
382
  "eval_steps_per_second": 3.64,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -404,12 +643,12 @@
404
  "should_evaluate": false,
405
  "should_log": false,
406
  "should_save": true,
407
- "should_training_stop": false
408
  },
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 5.978341255497646e+17,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6395394802093506,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.014168129177491025,
5
  "eval_steps": 25,
6
+ "global_step": 83,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 13.999,
382
  "eval_steps_per_second": 3.64,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.008705717928337858,
387
+ "grad_norm": 0.1751696616411209,
388
+ "learning_rate": 4.109423525312738e-05,
389
+ "loss": 0.5759,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.008876418279873895,
394
+ "grad_norm": 0.20063109695911407,
395
+ "learning_rate": 3.942473243151281e-05,
396
+ "loss": 0.5943,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.009047118631409931,
401
+ "grad_norm": 0.21948301792144775,
402
+ "learning_rate": 3.777924554357096e-05,
403
+ "loss": 0.657,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.009217818982945969,
408
+ "grad_norm": 0.21699479222297668,
409
+ "learning_rate": 3.616031181081575e-05,
410
+ "loss": 0.6304,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.009388519334482004,
415
+ "grad_norm": 0.24316714704036713,
416
+ "learning_rate": 3.45704275117204e-05,
417
+ "loss": 0.6608,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.009559219686018042,
422
+ "grad_norm": 0.22694571316242218,
423
+ "learning_rate": 3.301204413263704e-05,
424
+ "loss": 0.733,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.009729920037554077,
429
+ "grad_norm": 0.2339209020137787,
430
+ "learning_rate": 3.1487564587782306e-05,
431
+ "loss": 0.7035,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.009900620389090113,
436
+ "grad_norm": 0.2564551532268524,
437
+ "learning_rate": 2.9999339514117912e-05,
438
+ "loss": 0.7208,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.01007132074062615,
443
+ "grad_norm": 0.23558856546878815,
444
+ "learning_rate": 2.854966364683872e-05,
445
+ "loss": 0.6608,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 0.010242021092162186,
450
+ "grad_norm": 0.27021926641464233,
451
+ "learning_rate": 2.7140772281057468e-05,
452
+ "loss": 0.7057,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 0.010412721443698224,
457
+ "grad_norm": 0.3034929633140564,
458
+ "learning_rate": 2.577483782514174e-05,
459
+ "loss": 0.7375,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 0.01058342179523426,
464
+ "grad_norm": 0.35034218430519104,
465
+ "learning_rate": 2.445396645101762e-05,
466
+ "loss": 0.6164,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 0.010754122146770295,
471
+ "grad_norm": 0.34496256709098816,
472
+ "learning_rate": 2.3180194846605367e-05,
473
+ "loss": 0.5483,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 0.010924822498306333,
478
+ "grad_norm": 0.222571462392807,
479
+ "learning_rate": 2.195548707539416e-05,
480
+ "loss": 0.6643,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 0.011095522849842368,
485
+ "grad_norm": 0.2034004181623459,
486
+ "learning_rate": 2.0781731547998614e-05,
487
+ "loss": 0.5942,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 0.011266223201378406,
492
+ "grad_norm": 0.24817276000976562,
493
+ "learning_rate": 1.966073811036649e-05,
494
+ "loss": 0.6528,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 0.011436923552914442,
499
+ "grad_norm": 0.18400932848453522,
500
+ "learning_rate": 1.8594235253127375e-05,
501
+ "loss": 0.6657,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 0.011607623904450479,
506
+ "grad_norm": 0.2996188998222351,
507
+ "learning_rate": 1.758386744638546e-05,
508
+ "loss": 0.6882,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 0.011778324255986515,
513
+ "grad_norm": 0.23361016809940338,
514
+ "learning_rate": 1.6631192604065855e-05,
515
+ "loss": 0.6577,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 0.01194902460752255,
520
+ "grad_norm": 0.26406583189964294,
521
+ "learning_rate": 1.573767968172413e-05,
522
+ "loss": 0.7139,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.012119724959058588,
527
+ "grad_norm": 0.3076748251914978,
528
+ "learning_rate": 1.490470641152345e-05,
529
+ "loss": 0.7219,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.012290425310594624,
534
+ "grad_norm": 0.2459680140018463,
535
+ "learning_rate": 1.413355717787134e-05,
536
+ "loss": 0.6901,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 0.012461125662130661,
541
+ "grad_norm": 0.2629689872264862,
542
+ "learning_rate": 1.3425421036992098e-05,
543
+ "loss": 0.6635,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 0.012631826013666697,
548
+ "grad_norm": 0.3097344636917114,
549
+ "learning_rate": 1.2781389883488218e-05,
550
+ "loss": 0.6936,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 0.012802526365202732,
555
+ "grad_norm": 0.423374205827713,
556
+ "learning_rate": 1.2202456766718093e-05,
557
+ "loss": 0.5756,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 0.012802526365202732,
562
+ "eval_loss": 0.6261041760444641,
563
+ "eval_runtime": 3.5774,
564
+ "eval_samples_per_second": 13.977,
565
+ "eval_steps_per_second": 3.634,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.01297322671673877,
570
+ "grad_norm": 0.25061461329460144,
571
+ "learning_rate": 1.168951435958588e-05,
572
+ "loss": 0.5687,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.013143927068274806,
577
+ "grad_norm": 0.2536349296569824,
578
+ "learning_rate": 1.1243353582104556e-05,
579
+ "loss": 0.6822,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.013314627419810843,
584
+ "grad_norm": 0.1962866485118866,
585
+ "learning_rate": 1.0864662381854632e-05,
586
+ "loss": 0.649,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.013485327771346879,
591
+ "grad_norm": 0.1990254670381546,
592
+ "learning_rate": 1.0554024673218807e-05,
593
+ "loss": 0.6856,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.013656028122882916,
598
+ "grad_norm": 0.2070581167936325,
599
+ "learning_rate": 1.0311919437028318e-05,
600
+ "loss": 0.6754,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.013826728474418952,
605
+ "grad_norm": 0.21576634049415588,
606
+ "learning_rate": 1.0138719982009242e-05,
607
+ "loss": 0.6654,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.013997428825954988,
612
+ "grad_norm": 0.2243759036064148,
613
+ "learning_rate": 1.003469336916747e-05,
614
+ "loss": 0.6967,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.014168129177491025,
619
+ "grad_norm": 0.22119440138339996,
620
+ "learning_rate": 1e-05,
621
+ "loss": 0.7006,
622
+ "step": 83
623
  }
624
  ],
625
  "logging_steps": 1,
 
643
  "should_evaluate": false,
644
  "should_log": false,
645
  "should_save": true,
646
+ "should_training_stop": true
647
  },
648
  "attributes": {}
649
  }
650
  },
651
+ "total_flos": 9.919141468050555e+17,
652
  "train_batch_size": 1,
653
  "trial_name": null,
654
  "trial_params": null