|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.01412928293889085, |
|
"eval_steps": 10, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00028258565877781704, |
|
"eval_loss": 9.014374732971191, |
|
"eval_runtime": 45.8636, |
|
"eval_samples_per_second": 32.488, |
|
"eval_steps_per_second": 16.244, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001412928293889085, |
|
"grad_norm": 31.317241668701172, |
|
"learning_rate": 5e-05, |
|
"loss": 9.2381, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00282585658777817, |
|
"grad_norm": 30.054075241088867, |
|
"learning_rate": 0.0001, |
|
"loss": 7.6338, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.00282585658777817, |
|
"eval_loss": 6.305483341217041, |
|
"eval_runtime": 46.5414, |
|
"eval_samples_per_second": 32.015, |
|
"eval_steps_per_second": 16.007, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0042387848816672555, |
|
"grad_norm": 14.453044891357422, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 5.5007, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.00565171317555634, |
|
"grad_norm": 11.699786186218262, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 4.869, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.00565171317555634, |
|
"eval_loss": 4.397282600402832, |
|
"eval_runtime": 45.8608, |
|
"eval_samples_per_second": 32.49, |
|
"eval_steps_per_second": 16.245, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.007064641469445425, |
|
"grad_norm": 10.621557235717773, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 4.2254, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.008477569763334511, |
|
"grad_norm": 8.351560592651367, |
|
"learning_rate": 5e-05, |
|
"loss": 3.6046, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.008477569763334511, |
|
"eval_loss": 3.5391788482666016, |
|
"eval_runtime": 45.7633, |
|
"eval_samples_per_second": 32.559, |
|
"eval_steps_per_second": 16.279, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.009890498057223597, |
|
"grad_norm": 12.242228507995605, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 3.3887, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.01130342635111268, |
|
"grad_norm": 8.75986385345459, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 3.3271, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01130342635111268, |
|
"eval_loss": 3.2006571292877197, |
|
"eval_runtime": 46.1457, |
|
"eval_samples_per_second": 32.289, |
|
"eval_steps_per_second": 16.145, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.012716354645001766, |
|
"grad_norm": 10.265480041503906, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 3.1251, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.01412928293889085, |
|
"grad_norm": 10.496199607849121, |
|
"learning_rate": 0.0, |
|
"loss": 2.9414, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.01412928293889085, |
|
"eval_loss": 3.158576011657715, |
|
"eval_runtime": 46.2368, |
|
"eval_samples_per_second": 32.225, |
|
"eval_steps_per_second": 16.113, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 13, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1278959891251200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|