|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 4.886075949367089, |
|
"eval_steps": 500, |
|
"global_step": 195, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.25316455696202533, |
|
"grad_norm": 0.32324039936065674, |
|
"learning_rate": 0.0001997790438338385, |
|
"loss": 1.5745, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.5063291139240507, |
|
"grad_norm": 0.8489105105400085, |
|
"learning_rate": 0.00019730448705798239, |
|
"loss": 1.0497, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.759493670886076, |
|
"grad_norm": 0.7880690097808838, |
|
"learning_rate": 0.00019214762118704076, |
|
"loss": 0.7976, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.29379141330719, |
|
"learning_rate": 0.0001844506011066308, |
|
"loss": 0.8209, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.2531645569620253, |
|
"grad_norm": 1.0279569625854492, |
|
"learning_rate": 0.00017442560394846516, |
|
"loss": 0.6251, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.5063291139240507, |
|
"grad_norm": 0.636813223361969, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 0.5924, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.759493670886076, |
|
"grad_norm": 0.7099205255508423, |
|
"learning_rate": 0.00014855363571801523, |
|
"loss": 0.5931, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 1.2306174039840698, |
|
"learning_rate": 0.00013341985493931877, |
|
"loss": 0.6078, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 2.2531645569620253, |
|
"grad_norm": 0.7018842101097107, |
|
"learning_rate": 0.00011736481776669306, |
|
"loss": 0.4799, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 2.5063291139240507, |
|
"grad_norm": 0.35523730516433716, |
|
"learning_rate": 0.00010083109959960973, |
|
"loss": 0.4349, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.759493670886076, |
|
"grad_norm": 0.5931728482246399, |
|
"learning_rate": 8.427447122476148e-05, |
|
"loss": 0.4463, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 1.0323467254638672, |
|
"learning_rate": 6.815133497483157e-05, |
|
"loss": 0.4688, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 3.2531645569620253, |
|
"grad_norm": 0.34674957394599915, |
|
"learning_rate": 5.290614347797802e-05, |
|
"loss": 0.3858, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 3.5063291139240507, |
|
"grad_norm": 0.40083837509155273, |
|
"learning_rate": 3.89591478145437e-05, |
|
"loss": 0.4163, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 3.759493670886076, |
|
"grad_norm": 0.5980750322341919, |
|
"learning_rate": 2.669481281701739e-05, |
|
"loss": 0.3411, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 0.8719474673271179, |
|
"learning_rate": 1.6451218858706374e-05, |
|
"loss": 0.3478, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 4.253164556962025, |
|
"grad_norm": 0.31751662492752075, |
|
"learning_rate": 8.510742282896544e-06, |
|
"loss": 0.3534, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 4.506329113924051, |
|
"grad_norm": 0.37393781542778015, |
|
"learning_rate": 3.092271377092215e-06, |
|
"loss": 0.3226, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 4.759493670886076, |
|
"grad_norm": 0.4498203992843628, |
|
"learning_rate": 3.451724678784518e-07, |
|
"loss": 0.303, |
|
"step": 190 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 195, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 0, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.1684049994579968e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|