Training in progress, step 750, checkpoint
Browse files
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 319876032
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:57b6faa60d8c701666a9d5fcee1f685ec69cd9618f925244a3fc222b6447dbc8
|
3 |
size 319876032
|
last-checkpoint/optimizer.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 640010002
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:638e7f15ac1f1388fee114578cdbe315595262541250d98493ebe0ff2fe0d52b
|
3 |
size 640010002
|
last-checkpoint/rng_state.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 14244
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:edb447f63d665a92c0b5a3329ef77c28b5ad60571b15ad21996b1f09ef09590c
|
3 |
size 14244
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1256
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:01d7fef7d29480b2b6b9ce11dd77e0c699d943e8e5d91236651553a4ae0d3870
|
3 |
size 1256
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"best_metric": 0.
|
3 |
-
"best_model_checkpoint": "./output/checkpoint-
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 150,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -459,6 +459,119 @@
|
|
459 |
"eval_samples_per_second": 12.577,
|
460 |
"eval_steps_per_second": 12.577,
|
461 |
"step": 600
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
462 |
}
|
463 |
],
|
464 |
"logging_steps": 10,
|
@@ -478,7 +591,7 @@
|
|
478 |
"attributes": {}
|
479 |
}
|
480 |
},
|
481 |
-
"total_flos":
|
482 |
"train_batch_size": 4,
|
483 |
"trial_name": null,
|
484 |
"trial_params": null
|
|
|
1 |
{
|
2 |
+
"best_metric": 0.4108331799507141,
|
3 |
+
"best_model_checkpoint": "./output/checkpoint-750",
|
4 |
+
"epoch": 0.04240642315956124,
|
5 |
"eval_steps": 150,
|
6 |
+
"global_step": 750,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
459 |
"eval_samples_per_second": 12.577,
|
460 |
"eval_steps_per_second": 12.577,
|
461 |
"step": 600
|
462 |
+
},
|
463 |
+
{
|
464 |
+
"epoch": 0.0344905575031098,
|
465 |
+
"grad_norm": 6.550843715667725,
|
466 |
+
"learning_rate": 7.301309871606081e-06,
|
467 |
+
"loss": 0.3565,
|
468 |
+
"step": 610
|
469 |
+
},
|
470 |
+
{
|
471 |
+
"epoch": 0.03505597647857062,
|
472 |
+
"grad_norm": 1.943982720375061,
|
473 |
+
"learning_rate": 7.293514711482861e-06,
|
474 |
+
"loss": 0.2715,
|
475 |
+
"step": 620
|
476 |
+
},
|
477 |
+
{
|
478 |
+
"epoch": 0.03562139545403144,
|
479 |
+
"grad_norm": 4.950693607330322,
|
480 |
+
"learning_rate": 7.285573891348849e-06,
|
481 |
+
"loss": 0.2719,
|
482 |
+
"step": 630
|
483 |
+
},
|
484 |
+
{
|
485 |
+
"epoch": 0.03618681442949225,
|
486 |
+
"grad_norm": 7.106111526489258,
|
487 |
+
"learning_rate": 7.27748773762006e-06,
|
488 |
+
"loss": 0.287,
|
489 |
+
"step": 640
|
490 |
+
},
|
491 |
+
{
|
492 |
+
"epoch": 0.03675223340495307,
|
493 |
+
"grad_norm": 3.4435412883758545,
|
494 |
+
"learning_rate": 7.269256582686603e-06,
|
495 |
+
"loss": 0.2495,
|
496 |
+
"step": 650
|
497 |
+
},
|
498 |
+
{
|
499 |
+
"epoch": 0.03731765238041389,
|
500 |
+
"grad_norm": 3.9263601303100586,
|
501 |
+
"learning_rate": 7.260880764899016e-06,
|
502 |
+
"loss": 0.2317,
|
503 |
+
"step": 660
|
504 |
+
},
|
505 |
+
{
|
506 |
+
"epoch": 0.0378830713558747,
|
507 |
+
"grad_norm": 1.1376698017120361,
|
508 |
+
"learning_rate": 7.252360628554363e-06,
|
509 |
+
"loss": 0.138,
|
510 |
+
"step": 670
|
511 |
+
},
|
512 |
+
{
|
513 |
+
"epoch": 0.038448490331335516,
|
514 |
+
"grad_norm": 7.205196857452393,
|
515 |
+
"learning_rate": 7.243696523882079e-06,
|
516 |
+
"loss": 0.1982,
|
517 |
+
"step": 680
|
518 |
+
},
|
519 |
+
{
|
520 |
+
"epoch": 0.03901390930679634,
|
521 |
+
"grad_norm": 3.7006053924560547,
|
522 |
+
"learning_rate": 7.2348888070295705e-06,
|
523 |
+
"loss": 0.2156,
|
524 |
+
"step": 690
|
525 |
+
},
|
526 |
+
{
|
527 |
+
"epoch": 0.03957932828225715,
|
528 |
+
"grad_norm": 0.5304602384567261,
|
529 |
+
"learning_rate": 7.225937840047583e-06,
|
530 |
+
"loss": 0.3153,
|
531 |
+
"step": 700
|
532 |
+
},
|
533 |
+
{
|
534 |
+
"epoch": 0.040144747257717966,
|
535 |
+
"grad_norm": 14.555486679077148,
|
536 |
+
"learning_rate": 7.216843990875307e-06,
|
537 |
+
"loss": 0.3455,
|
538 |
+
"step": 710
|
539 |
+
},
|
540 |
+
{
|
541 |
+
"epoch": 0.04071016623317879,
|
542 |
+
"grad_norm": 20.35503578186035,
|
543 |
+
"learning_rate": 7.207607633325266e-06,
|
544 |
+
"loss": 0.2996,
|
545 |
+
"step": 720
|
546 |
+
},
|
547 |
+
{
|
548 |
+
"epoch": 0.0412755852086396,
|
549 |
+
"grad_norm": 0.4252071678638458,
|
550 |
+
"learning_rate": 7.198229147067941e-06,
|
551 |
+
"loss": 0.2781,
|
552 |
+
"step": 730
|
553 |
+
},
|
554 |
+
{
|
555 |
+
"epoch": 0.04184100418410042,
|
556 |
+
"grad_norm": 0.641488790512085,
|
557 |
+
"learning_rate": 7.18870891761617e-06,
|
558 |
+
"loss": 0.1364,
|
559 |
+
"step": 740
|
560 |
+
},
|
561 |
+
{
|
562 |
+
"epoch": 0.04240642315956124,
|
563 |
+
"grad_norm": 7.14177942276001,
|
564 |
+
"learning_rate": 7.1790473363092974e-06,
|
565 |
+
"loss": 0.2639,
|
566 |
+
"step": 750
|
567 |
+
},
|
568 |
+
{
|
569 |
+
"epoch": 0.04240642315956124,
|
570 |
+
"eval_loss": 0.4108331799507141,
|
571 |
+
"eval_runtime": 39.6561,
|
572 |
+
"eval_samples_per_second": 12.608,
|
573 |
+
"eval_steps_per_second": 12.608,
|
574 |
+
"step": 750
|
575 |
}
|
576 |
],
|
577 |
"logging_steps": 10,
|
|
|
591 |
"attributes": {}
|
592 |
}
|
593 |
},
|
594 |
+
"total_flos": 4.289226525366682e+16,
|
595 |
"train_batch_size": 4,
|
596 |
"trial_name": null,
|
597 |
"trial_params": null
|