Doowon96's picture
Training in progress, epoch 2
ef86e95 verified
raw
history blame
1.72 kB
{
"best_metric": 0.5966597659617285,
"best_model_checkpoint": "test-klue/ynat/run-0/checkpoint-2764",
"epoch": 2.0,
"eval_steps": 500,
"global_step": 2764,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.36,
"learning_rate": 1.549617863520091e-06,
"loss": 1.0402,
"step": 500
},
{
"epoch": 0.72,
"learning_rate": 1.2073877699865019e-06,
"loss": 0.8825,
"step": 1000
},
{
"epoch": 1.0,
"eval_f1": 0.5922490852804443,
"eval_loss": 0.8286302089691162,
"eval_runtime": 9.5242,
"eval_samples_per_second": 248.734,
"eval_steps_per_second": 1.995,
"step": 1382
},
{
"epoch": 1.09,
"learning_rate": 8.65157676452913e-07,
"loss": 0.8276,
"step": 1500
},
{
"epoch": 1.45,
"learning_rate": 5.22927582919324e-07,
"loss": 0.7615,
"step": 2000
},
{
"epoch": 1.81,
"learning_rate": 1.8069748938573499e-07,
"loss": 0.7606,
"step": 2500
},
{
"epoch": 2.0,
"eval_f1": 0.5966597659617285,
"eval_loss": 0.8385059237480164,
"eval_runtime": 9.5681,
"eval_samples_per_second": 247.594,
"eval_steps_per_second": 1.986,
"step": 2764
}
],
"logging_steps": 500,
"max_steps": 2764,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"total_flos": 417767579078724.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": {
"learning_rate": 1.8850033551830082e-06,
"num_train_epochs": 2,
"per_device_train_batch_size": 4,
"seed": 19
}
}