Aivesa's picture
Training in progress, step 10, checkpoint
e9d4cbe verified
raw
history blame
2.99 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.0051013901288101005,
"eval_steps": 3,
"global_step": 10,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0005101390128810101,
"grad_norm": 0.25031808018684387,
"learning_rate": 2e-05,
"loss": 2.5092,
"step": 1
},
{
"epoch": 0.0010202780257620202,
"grad_norm": 0.27107110619544983,
"learning_rate": 4e-05,
"loss": 2.6743,
"step": 2
},
{
"epoch": 0.0015304170386430302,
"grad_norm": 0.24843961000442505,
"learning_rate": 6e-05,
"loss": 2.4752,
"step": 3
},
{
"epoch": 0.0015304170386430302,
"eval_loss": 2.6821274757385254,
"eval_runtime": 75.2159,
"eval_samples_per_second": 10.982,
"eval_steps_per_second": 5.491,
"step": 3
},
{
"epoch": 0.0020405560515240405,
"grad_norm": 0.25299274921417236,
"learning_rate": 8e-05,
"loss": 2.7086,
"step": 4
},
{
"epoch": 0.0025506950644050503,
"grad_norm": 0.2649744749069214,
"learning_rate": 0.0001,
"loss": 2.422,
"step": 5
},
{
"epoch": 0.0030608340772860605,
"grad_norm": 0.2959408760070801,
"learning_rate": 0.00012,
"loss": 2.6637,
"step": 6
},
{
"epoch": 0.0030608340772860605,
"eval_loss": 2.6740875244140625,
"eval_runtime": 75.7278,
"eval_samples_per_second": 10.907,
"eval_steps_per_second": 5.454,
"step": 6
},
{
"epoch": 0.0035709730901670707,
"grad_norm": 0.28532931208610535,
"learning_rate": 0.00014,
"loss": 2.3546,
"step": 7
},
{
"epoch": 0.004081112103048081,
"grad_norm": 0.39420419931411743,
"learning_rate": 0.00016,
"loss": 2.9987,
"step": 8
},
{
"epoch": 0.00459125111592909,
"grad_norm": 0.3755263388156891,
"learning_rate": 0.00018,
"loss": 2.5646,
"step": 9
},
{
"epoch": 0.00459125111592909,
"eval_loss": 2.6205434799194336,
"eval_runtime": 75.4786,
"eval_samples_per_second": 10.943,
"eval_steps_per_second": 5.472,
"step": 9
},
{
"epoch": 0.0051013901288101005,
"grad_norm": 0.4062849283218384,
"learning_rate": 0.0002,
"loss": 2.4832,
"step": 10
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3136744882962432.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}