Hanzalwi's picture
Training in progress, step 1000, checkpoint
5a58ccb
raw
history blame
3.55 kB
{
"best_metric": 1.4297349452972412,
"best_model_checkpoint": "./outputs/checkpoint-1000",
"epoch": 1.3333333333333333,
"eval_steps": 100,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 1.4574,
"step": 100
},
{
"epoch": 0.13,
"eval_loss": 1.5645889043807983,
"eval_runtime": 93.6488,
"eval_samples_per_second": 15.494,
"eval_steps_per_second": 1.943,
"step": 100
},
{
"epoch": 0.27,
"learning_rate": 0.0002,
"loss": 1.2971,
"step": 200
},
{
"epoch": 0.27,
"eval_loss": 1.5321934223175049,
"eval_runtime": 93.3383,
"eval_samples_per_second": 15.546,
"eval_steps_per_second": 1.95,
"step": 200
},
{
"epoch": 0.4,
"learning_rate": 0.0002,
"loss": 1.2756,
"step": 300
},
{
"epoch": 0.4,
"eval_loss": 1.5133259296417236,
"eval_runtime": 93.2652,
"eval_samples_per_second": 15.558,
"eval_steps_per_second": 1.951,
"step": 300
},
{
"epoch": 0.53,
"learning_rate": 0.0002,
"loss": 1.2661,
"step": 400
},
{
"epoch": 0.53,
"eval_loss": 1.500829815864563,
"eval_runtime": 93.3122,
"eval_samples_per_second": 15.55,
"eval_steps_per_second": 1.95,
"step": 400
},
{
"epoch": 0.67,
"learning_rate": 0.0002,
"loss": 1.2493,
"step": 500
},
{
"epoch": 0.67,
"eval_loss": 1.4804445505142212,
"eval_runtime": 93.3662,
"eval_samples_per_second": 15.541,
"eval_steps_per_second": 1.949,
"step": 500
},
{
"epoch": 0.8,
"learning_rate": 0.0002,
"loss": 1.2327,
"step": 600
},
{
"epoch": 0.8,
"eval_loss": 1.4673335552215576,
"eval_runtime": 93.4173,
"eval_samples_per_second": 15.532,
"eval_steps_per_second": 1.948,
"step": 600
},
{
"epoch": 0.93,
"learning_rate": 0.0002,
"loss": 1.2216,
"step": 700
},
{
"epoch": 0.93,
"eval_loss": 1.456375241279602,
"eval_runtime": 93.4236,
"eval_samples_per_second": 15.531,
"eval_steps_per_second": 1.948,
"step": 700
},
{
"epoch": 1.07,
"learning_rate": 0.0002,
"loss": 1.2086,
"step": 800
},
{
"epoch": 1.07,
"eval_loss": 1.4467251300811768,
"eval_runtime": 93.3142,
"eval_samples_per_second": 15.55,
"eval_steps_per_second": 1.95,
"step": 800
},
{
"epoch": 1.2,
"learning_rate": 0.0002,
"loss": 1.1922,
"step": 900
},
{
"epoch": 1.2,
"eval_loss": 1.4370195865631104,
"eval_runtime": 93.2407,
"eval_samples_per_second": 15.562,
"eval_steps_per_second": 1.952,
"step": 900
},
{
"epoch": 1.33,
"learning_rate": 0.0002,
"loss": 1.1859,
"step": 1000
},
{
"epoch": 1.33,
"eval_loss": 1.4297349452972412,
"eval_runtime": 93.4112,
"eval_samples_per_second": 15.533,
"eval_steps_per_second": 1.948,
"step": 1000
}
],
"logging_steps": 100,
"max_steps": 2250,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 100,
"total_flos": 5.844782394372096e+16,
"trial_name": null,
"trial_params": null
}