|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 6864, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.07284382284382285, |
|
"grad_norm": 3.079172372817993, |
|
"learning_rate": 1.8543123543123545e-05, |
|
"loss": 2.2282, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.1456876456876457, |
|
"grad_norm": 2.744220018386841, |
|
"learning_rate": 1.7086247086247088e-05, |
|
"loss": 1.8712, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.21853146853146854, |
|
"grad_norm": 3.1921932697296143, |
|
"learning_rate": 1.562937062937063e-05, |
|
"loss": 1.7247, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2913752913752914, |
|
"grad_norm": 2.575843334197998, |
|
"learning_rate": 1.4172494172494174e-05, |
|
"loss": 1.632, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36421911421911424, |
|
"grad_norm": 2.898587703704834, |
|
"learning_rate": 1.2715617715617716e-05, |
|
"loss": 1.5467, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.4370629370629371, |
|
"grad_norm": 2.6611907482147217, |
|
"learning_rate": 1.1258741258741259e-05, |
|
"loss": 1.5052, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.5099067599067599, |
|
"grad_norm": 2.7068097591400146, |
|
"learning_rate": 9.801864801864802e-06, |
|
"loss": 1.4542, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.5827505827505828, |
|
"grad_norm": 2.4694888591766357, |
|
"learning_rate": 8.344988344988347e-06, |
|
"loss": 1.4166, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.6555944055944056, |
|
"grad_norm": 2.4586915969848633, |
|
"learning_rate": 6.888111888111889e-06, |
|
"loss": 1.3813, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.7284382284382285, |
|
"grad_norm": 2.5161385536193848, |
|
"learning_rate": 5.431235431235432e-06, |
|
"loss": 1.3543, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.8012820512820513, |
|
"grad_norm": 2.7101500034332275, |
|
"learning_rate": 3.974358974358974e-06, |
|
"loss": 1.328, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.8741258741258742, |
|
"grad_norm": 3.201040506362915, |
|
"learning_rate": 2.517482517482518e-06, |
|
"loss": 1.3119, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.946969696969697, |
|
"grad_norm": 2.8589653968811035, |
|
"learning_rate": 1.0606060606060608e-06, |
|
"loss": 1.3049, |
|
"step": 6500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 6864, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.217315920596173e+16, |
|
"train_batch_size": 32, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|