|
{
|
|
"best_metric": null,
|
|
"best_model_checkpoint": null,
|
|
"epoch": 2.575107296137339,
|
|
"eval_steps": 500,
|
|
"global_step": 3000,
|
|
"is_hyper_param_search": false,
|
|
"is_local_process_zero": true,
|
|
"is_world_process_zero": true,
|
|
"log_history": [
|
|
{
|
|
"epoch": 0.08583690987124463,
|
|
"grad_norm": 0.0872349739074707,
|
|
"learning_rate": 0.0001998960663781063,
|
|
"loss": 0.7368,
|
|
"step": 100
|
|
},
|
|
{
|
|
"epoch": 0.17167381974248927,
|
|
"grad_norm": 0.09454165399074554,
|
|
"learning_rate": 0.00019906589321760313,
|
|
"loss": 0.5448,
|
|
"step": 200
|
|
},
|
|
{
|
|
"epoch": 0.2575107296137339,
|
|
"grad_norm": 0.1324642151594162,
|
|
"learning_rate": 0.00019741244594178857,
|
|
"loss": 0.5027,
|
|
"step": 300
|
|
},
|
|
{
|
|
"epoch": 0.34334763948497854,
|
|
"grad_norm": 0.12300574034452438,
|
|
"learning_rate": 0.00019494946530743566,
|
|
"loss": 0.4819,
|
|
"step": 400
|
|
},
|
|
{
|
|
"epoch": 0.4291845493562232,
|
|
"grad_norm": 0.10243005305528641,
|
|
"learning_rate": 0.00019169741959214142,
|
|
"loss": 0.4581,
|
|
"step": 500
|
|
},
|
|
{
|
|
"epoch": 0.5150214592274678,
|
|
"grad_norm": 0.1381332278251648,
|
|
"learning_rate": 0.0001876833344953899,
|
|
"loss": 0.4406,
|
|
"step": 600
|
|
},
|
|
{
|
|
"epoch": 0.6008583690987125,
|
|
"grad_norm": 0.09684882313013077,
|
|
"learning_rate": 0.0001829405685450202,
|
|
"loss": 0.4233,
|
|
"step": 700
|
|
},
|
|
{
|
|
"epoch": 0.6866952789699571,
|
|
"grad_norm": 0.11898784339427948,
|
|
"learning_rate": 0.00017750853587555535,
|
|
"loss": 0.4111,
|
|
"step": 800
|
|
},
|
|
{
|
|
"epoch": 0.7725321888412017,
|
|
"grad_norm": 0.11926570534706116,
|
|
"learning_rate": 0.00017143237868220553,
|
|
"loss": 0.3987,
|
|
"step": 900
|
|
},
|
|
{
|
|
"epoch": 0.8583690987124464,
|
|
"grad_norm": 0.10152015089988708,
|
|
"learning_rate": 0.00016476259207257407,
|
|
"loss": 0.3935,
|
|
"step": 1000
|
|
},
|
|
{
|
|
"epoch": 0.944206008583691,
|
|
"grad_norm": 0.12837441265583038,
|
|
"learning_rate": 0.0001575546044336872,
|
|
"loss": 0.3853,
|
|
"step": 1100
|
|
},
|
|
{
|
|
"epoch": 1.0300429184549356,
|
|
"grad_norm": 0.15152864158153534,
|
|
"learning_rate": 0.00014986831680165167,
|
|
"loss": 0.3771,
|
|
"step": 1200
|
|
},
|
|
{
|
|
"epoch": 1.1158798283261802,
|
|
"grad_norm": 0.13338012993335724,
|
|
"learning_rate": 0.00014176760506194906,
|
|
"loss": 0.3582,
|
|
"step": 1300
|
|
},
|
|
{
|
|
"epoch": 1.201716738197425,
|
|
"grad_norm": 0.14733010530471802,
|
|
"learning_rate": 0.00013331978911726523,
|
|
"loss": 0.3559,
|
|
"step": 1400
|
|
},
|
|
{
|
|
"epoch": 1.2875536480686696,
|
|
"grad_norm": 0.09903525561094284,
|
|
"learning_rate": 0.00012459507343426653,
|
|
"loss": 0.3515,
|
|
"step": 1500
|
|
},
|
|
{
|
|
"epoch": 1.3733905579399142,
|
|
"grad_norm": 0.08920130133628845,
|
|
"learning_rate": 0.00011566596361858548,
|
|
"loss": 0.344,
|
|
"step": 1600
|
|
},
|
|
{
|
|
"epoch": 1.4592274678111588,
|
|
"grad_norm": 0.14736886322498322,
|
|
"learning_rate": 0.0001066066638664925,
|
|
"loss": 0.3378,
|
|
"step": 1700
|
|
},
|
|
{
|
|
"epoch": 1.5450643776824036,
|
|
"grad_norm": 0.12657657265663147,
|
|
"learning_rate": 9.749246030065306e-05,
|
|
"loss": 0.3393,
|
|
"step": 1800
|
|
},
|
|
{
|
|
"epoch": 1.6309012875536482,
|
|
"grad_norm": 0.11915148794651031,
|
|
"learning_rate": 8.839909531467737e-05,
|
|
"loss": 0.3379,
|
|
"step": 1900
|
|
},
|
|
{
|
|
"epoch": 1.7167381974248928,
|
|
"grad_norm": 0.12128829956054688,
|
|
"learning_rate": 7.940213812589018e-05,
|
|
"loss": 0.331,
|
|
"step": 2000
|
|
},
|
|
{
|
|
"epoch": 1.8025751072961373,
|
|
"grad_norm": 0.12637649476528168,
|
|
"learning_rate": 7.057635676725945e-05,
|
|
"loss": 0.3292,
|
|
"step": 2100
|
|
},
|
|
{
|
|
"epoch": 1.888412017167382,
|
|
"grad_norm": 0.1001851037144661,
|
|
"learning_rate": 6.199509673746246e-05,
|
|
"loss": 0.3285,
|
|
"step": 2200
|
|
},
|
|
{
|
|
"epoch": 1.9742489270386265,
|
|
"grad_norm": 0.11114014685153961,
|
|
"learning_rate": 5.372967147273683e-05,
|
|
"loss": 0.3235,
|
|
"step": 2300
|
|
},
|
|
{
|
|
"epoch": 2.060085836909871,
|
|
"grad_norm": 0.09635433554649353,
|
|
"learning_rate": 4.584876970591957e-05,
|
|
"loss": 0.319,
|
|
"step": 2400
|
|
},
|
|
{
|
|
"epoch": 2.1459227467811157,
|
|
"grad_norm": 0.11145951598882675,
|
|
"learning_rate": 3.841788463774003e-05,
|
|
"loss": 0.3167,
|
|
"step": 2500
|
|
},
|
|
{
|
|
"epoch": 2.2317596566523603,
|
|
"grad_norm": 0.09987546503543854,
|
|
"learning_rate": 3.149876966416321e-05,
|
|
"loss": 0.3165,
|
|
"step": 2600
|
|
},
|
|
{
|
|
"epoch": 2.317596566523605,
|
|
"grad_norm": 0.10801953822374344,
|
|
"learning_rate": 2.514892518288988e-05,
|
|
"loss": 0.3086,
|
|
"step": 2700
|
|
},
|
|
{
|
|
"epoch": 2.40343347639485,
|
|
"grad_norm": 0.10372287780046463,
|
|
"learning_rate": 1.9421120743841902e-05,
|
|
"loss": 0.3127,
|
|
"step": 2800
|
|
},
|
|
{
|
|
"epoch": 2.4892703862660945,
|
|
"grad_norm": 0.10351293534040451,
|
|
"learning_rate": 1.436295651473667e-05,
|
|
"loss": 0.3123,
|
|
"step": 2900
|
|
},
|
|
{
|
|
"epoch": 2.575107296137339,
|
|
"grad_norm": 0.10307193547487259,
|
|
"learning_rate": 1.0016467706135135e-05,
|
|
"loss": 0.3108,
|
|
"step": 3000
|
|
}
|
|
],
|
|
"logging_steps": 100,
|
|
"max_steps": 3495,
|
|
"num_input_tokens_seen": 0,
|
|
"num_train_epochs": 3,
|
|
"save_steps": 1000,
|
|
"stateful_callbacks": {
|
|
"TrainerControl": {
|
|
"args": {
|
|
"should_epoch_stop": false,
|
|
"should_evaluate": false,
|
|
"should_log": false,
|
|
"should_save": true,
|
|
"should_training_stop": false
|
|
},
|
|
"attributes": {}
|
|
}
|
|
},
|
|
"total_flos": 3.9668426131636224e+17,
|
|
"train_batch_size": 2,
|
|
"trial_name": null,
|
|
"trial_params": null
|
|
}
|
|
|