ClarenceDan's picture
Training in progress, step 6, checkpoint
fed2644 verified
raw
history blame
2.36 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.00042494422607032827,
"eval_steps": 3,
"global_step": 6,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 7.082403767838805e-05,
"grad_norm": 0.5956665277481079,
"learning_rate": 2e-05,
"loss": 1.4888,
"step": 1
},
{
"epoch": 7.082403767838805e-05,
"eval_loss": 1.285233497619629,
"eval_runtime": 244.9702,
"eval_samples_per_second": 24.272,
"eval_steps_per_second": 12.136,
"step": 1
},
{
"epoch": 0.0001416480753567761,
"grad_norm": 0.3512568175792694,
"learning_rate": 4e-05,
"loss": 1.0845,
"step": 2
},
{
"epoch": 0.00021247211303516414,
"grad_norm": 0.2720630168914795,
"learning_rate": 6e-05,
"loss": 1.1689,
"step": 3
},
{
"epoch": 0.00021247211303516414,
"eval_loss": 1.2835770845413208,
"eval_runtime": 243.9274,
"eval_samples_per_second": 24.376,
"eval_steps_per_second": 12.188,
"step": 3
},
{
"epoch": 0.0002832961507135522,
"grad_norm": 0.5889267921447754,
"learning_rate": 8e-05,
"loss": 1.1645,
"step": 4
},
{
"epoch": 0.0003541201883919402,
"grad_norm": 0.8530604839324951,
"learning_rate": 0.0001,
"loss": 1.3131,
"step": 5
},
{
"epoch": 0.00042494422607032827,
"grad_norm": 0.24282629787921906,
"learning_rate": 0.00012,
"loss": 1.1017,
"step": 6
},
{
"epoch": 0.00042494422607032827,
"eval_loss": 1.2677675485610962,
"eval_runtime": 246.6787,
"eval_samples_per_second": 24.104,
"eval_steps_per_second": 12.052,
"step": 6
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1400649484861440.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}