llama-3-8b-finetuned / trainer_state.json
fay-ong's picture
End of training
1b21271 verified
raw
history blame
5.01 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 250,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08,
"grad_norm": 10.01574420928955,
"learning_rate": 6.4000000000000006e-06,
"loss": 1.545,
"step": 10
},
{
"epoch": 0.16,
"grad_norm": 5.001451015472412,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.6012,
"step": 20
},
{
"epoch": 0.24,
"grad_norm": 2.802138566970825,
"learning_rate": 1.9991228300988586e-05,
"loss": 0.3138,
"step": 30
},
{
"epoch": 0.32,
"grad_norm": 5.7909345626831055,
"learning_rate": 1.983571470813386e-05,
"loss": 0.2683,
"step": 40
},
{
"epoch": 0.4,
"grad_norm": 2.0869674682617188,
"learning_rate": 1.9488760116444966e-05,
"loss": 0.0889,
"step": 50
},
{
"epoch": 0.48,
"grad_norm": 0.8485150337219238,
"learning_rate": 1.895711760239413e-05,
"loss": 0.202,
"step": 60
},
{
"epoch": 0.56,
"grad_norm": 2.812760353088379,
"learning_rate": 1.8251134982782952e-05,
"loss": 0.1592,
"step": 70
},
{
"epoch": 0.64,
"grad_norm": 7.189540863037109,
"learning_rate": 1.7384553406258842e-05,
"loss": 0.1,
"step": 80
},
{
"epoch": 0.72,
"grad_norm": 2.625497579574585,
"learning_rate": 1.63742398974869e-05,
"loss": 0.1889,
"step": 90
},
{
"epoch": 0.8,
"grad_norm": 0.10543157905340195,
"learning_rate": 1.5239859059700794e-05,
"loss": 0.1133,
"step": 100
},
{
"epoch": 0.88,
"grad_norm": 4.1977386474609375,
"learning_rate": 1.4003490325568953e-05,
"loss": 0.0466,
"step": 110
},
{
"epoch": 0.96,
"grad_norm": 0.047037750482559204,
"learning_rate": 1.2689198206152657e-05,
"loss": 0.1327,
"step": 120
},
{
"epoch": 1.04,
"grad_norm": 1.9393171072006226,
"learning_rate": 1.1322563902571227e-05,
"loss": 0.0181,
"step": 130
},
{
"epoch": 1.12,
"grad_norm": 3.4030303955078125,
"learning_rate": 9.930187397020385e-06,
"loss": 0.0681,
"step": 140
},
{
"epoch": 1.2,
"grad_norm": 0.11634967476129532,
"learning_rate": 8.539169714375885e-06,
"loss": 0.0355,
"step": 150
},
{
"epoch": 1.28,
"grad_norm": 1.0528508424758911,
"learning_rate": 7.176585431571235e-06,
"loss": 0.1156,
"step": 160
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.18656061589717865,
"learning_rate": 5.868955701754584e-06,
"loss": 0.0125,
"step": 170
},
{
"epoch": 1.44,
"grad_norm": 0.2206106185913086,
"learning_rate": 4.641732050210032e-06,
"loss": 0.0465,
"step": 180
},
{
"epoch": 1.52,
"grad_norm": 0.03330332040786743,
"learning_rate": 3.5188009893686916e-06,
"loss": 0.0085,
"step": 190
},
{
"epoch": 1.6,
"grad_norm": 0.2540493309497833,
"learning_rate": 2.522019095014683e-06,
"loss": 0.0382,
"step": 200
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.01842614635825157,
"learning_rate": 1.6707875928990059e-06,
"loss": 0.0105,
"step": 210
},
{
"epoch": 1.76,
"grad_norm": 1.4763164520263672,
"learning_rate": 9.816747359488632e-07,
"loss": 0.0363,
"step": 220
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.038556963205337524,
"learning_rate": 4.6809332207053083e-07,
"loss": 0.0317,
"step": 230
},
{
"epoch": 1.92,
"grad_norm": 6.009864330291748,
"learning_rate": 1.400396292949513e-07,
"loss": 0.0262,
"step": 240
},
{
"epoch": 2.0,
"grad_norm": 0.019221752882003784,
"learning_rate": 3.898849596456477e-09,
"loss": 0.0135,
"step": 250
},
{
"epoch": 2.0,
"step": 250,
"total_flos": 1.5362943036162048e+16,
"train_loss": 0.16884834933280946,
"train_runtime": 1433.2088,
"train_samples_per_second": 0.698,
"train_steps_per_second": 0.174
}
],
"logging_steps": 10,
"max_steps": 250,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.5362943036162048e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}