besimray commited on
Commit
e2cd47e
·
verified ·
1 Parent(s): 87c4e05

Training in progress, step 60, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a0ce744c574dde5ff01d54cbde9aaf4826321e6dbab34fba0301d723d5f12af
3
  size 125048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90bf8b1abd077560ae21524bb4b9db9586219a179e281917a253696ddb8257bc
3
  size 125048
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a74aa14ddf23ce2fdd4b7f8ae4bf9b779c8c56b388e745f1fc77cb88b58b464a
3
  size 162868
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:262a50e5a7cc8f345a277592b0f488331df7618f612800c3a2617d850e787308
3
  size 162868
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d70924c232436c800df90f52f87adf6e3125c020b3b0e481f02d258de1cd8e9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44059b09209f87972294b03cfee10b90679da0d01bc9e481ff780b446fb3e5e2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfc2bf0eccc6c4e85c949c664a83bcd160767da77920eebf352a6f7f7c4c9b2e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54b996514a941dd419a3f7869454171b960cb51cf1b91d9b10dbdcf1b1e50a10
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 11.067892074584961,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.002259835935911053,
5
  "eval_steps": 5,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -445,6 +445,92 @@
445
  "eval_samples_per_second": 52.78,
446
  "eval_steps_per_second": 26.393,
447
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
448
  }
449
  ],
450
  "logging_steps": 1,
@@ -473,7 +559,7 @@
473
  "attributes": {}
474
  }
475
  },
476
- "total_flos": 525336576000.0,
477
  "train_batch_size": 2,
478
  "trial_name": null,
479
  "trial_params": null
 
1
  {
2
+ "best_metric": 11.05736255645752,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-60",
4
+ "epoch": 0.0027118031230932635,
5
  "eval_steps": 5,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
445
  "eval_samples_per_second": 52.78,
446
  "eval_steps_per_second": 26.393,
447
  "step": 50
448
+ },
449
+ {
450
+ "epoch": 0.002305032654629274,
451
+ "grad_norm": 0.7787512540817261,
452
+ "learning_rate": 0.00019656487088855592,
453
+ "loss": 44.2918,
454
+ "step": 51
455
+ },
456
+ {
457
+ "epoch": 0.002350229373347495,
458
+ "grad_norm": 0.7184544801712036,
459
+ "learning_rate": 0.00019639628606958533,
460
+ "loss": 44.2751,
461
+ "step": 52
462
+ },
463
+ {
464
+ "epoch": 0.002395426092065716,
465
+ "grad_norm": 0.7348573803901672,
466
+ "learning_rate": 0.0001962237387768529,
467
+ "loss": 44.246,
468
+ "step": 53
469
+ },
470
+ {
471
+ "epoch": 0.002440622810783937,
472
+ "grad_norm": 0.7713965773582458,
473
+ "learning_rate": 0.00019604723610310194,
474
+ "loss": 44.3292,
475
+ "step": 54
476
+ },
477
+ {
478
+ "epoch": 0.002485819529502158,
479
+ "grad_norm": 0.8040369749069214,
480
+ "learning_rate": 0.00019586678530366606,
481
+ "loss": 44.2155,
482
+ "step": 55
483
+ },
484
+ {
485
+ "epoch": 0.002485819529502158,
486
+ "eval_loss": 11.062650680541992,
487
+ "eval_runtime": 176.278,
488
+ "eval_samples_per_second": 52.854,
489
+ "eval_steps_per_second": 26.43,
490
+ "step": 55
491
+ },
492
+ {
493
+ "epoch": 0.0025310162482203792,
494
+ "grad_norm": 0.7459877133369446,
495
+ "learning_rate": 0.00019568239379617088,
496
+ "loss": 44.2188,
497
+ "step": 56
498
+ },
499
+ {
500
+ "epoch": 0.0025762129669386004,
501
+ "grad_norm": 0.8008533716201782,
502
+ "learning_rate": 0.00019549406916022905,
503
+ "loss": 44.226,
504
+ "step": 57
505
+ },
506
+ {
507
+ "epoch": 0.002621409685656821,
508
+ "grad_norm": 0.7918010354042053,
509
+ "learning_rate": 0.00019530181913712872,
510
+ "loss": 44.287,
511
+ "step": 58
512
+ },
513
+ {
514
+ "epoch": 0.0026666064043750423,
515
+ "grad_norm": 0.7287217974662781,
516
+ "learning_rate": 0.00019510565162951537,
517
+ "loss": 44.2581,
518
+ "step": 59
519
+ },
520
+ {
521
+ "epoch": 0.0027118031230932635,
522
+ "grad_norm": 0.7925474643707275,
523
+ "learning_rate": 0.00019490557470106686,
524
+ "loss": 44.2277,
525
+ "step": 60
526
+ },
527
+ {
528
+ "epoch": 0.0027118031230932635,
529
+ "eval_loss": 11.05736255645752,
530
+ "eval_runtime": 176.2465,
531
+ "eval_samples_per_second": 52.863,
532
+ "eval_steps_per_second": 26.435,
533
+ "step": 60
534
  }
535
  ],
536
  "logging_steps": 1,
 
559
  "attributes": {}
560
  }
561
  },
562
+ "total_flos": 630403891200.0,
563
  "train_batch_size": 2,
564
  "trial_name": null,
565
  "trial_params": null