test_llama_2_ko_3 / trainer_state.json
kim1's picture
test ๋ชฉ์  LLAMA-2-KO๋ฅผ ์ถ”๊ฐ€์ ์œผ๋กœ ํŒŒ์ธํŠœ๋‹์„ ํ•œ๊ฒƒ
827a50c verified
raw
history blame
1.75 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 29.85745140388769,
"eval_steps": 500,
"global_step": 4320,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"learning_rate": 2e-05,
"loss": 2.3508,
"step": 1
},
{
"epoch": 3.46,
"learning_rate": 1.7699074074074075e-05,
"loss": 0.4416,
"step": 500
},
{
"epoch": 6.91,
"learning_rate": 1.5412037037037036e-05,
"loss": 0.0209,
"step": 1000
},
{
"epoch": 10.37,
"learning_rate": 1.3125e-05,
"loss": 0.0105,
"step": 1500
},
{
"epoch": 13.82,
"learning_rate": 1.0828703703703705e-05,
"loss": 0.0155,
"step": 2000
},
{
"epoch": 17.28,
"learning_rate": 8.51388888888889e-06,
"loss": 0.0157,
"step": 2500
},
{
"epoch": 20.73,
"learning_rate": 6.199074074074075e-06,
"loss": 0.0118,
"step": 3000
},
{
"epoch": 24.19,
"learning_rate": 3.88425925925926e-06,
"loss": 0.0117,
"step": 3500
},
{
"epoch": 27.65,
"learning_rate": 1.5694444444444446e-06,
"loss": 0.0112,
"step": 4000
},
{
"epoch": 29.86,
"step": 4320,
"total_flos": 2.83089920921174e+18,
"train_loss": 0.06351593561746456,
"train_runtime": 61407.6851,
"train_samples_per_second": 1.131,
"train_steps_per_second": 0.07
}
],
"logging_steps": 500,
"max_steps": 4320,
"num_train_epochs": 30,
"save_steps": 500,
"total_flos": 2.83089920921174e+18,
"trial_name": null,
"trial_params": null
}