mamung's picture
Training in progress, step 100, checkpoint
cf43460 verified
raw
history blame
8.89 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.911589008363202,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.019115890083632018,
"eval_loss": 4.260312557220459,
"eval_runtime": 20.2891,
"eval_samples_per_second": 8.724,
"eval_steps_per_second": 4.387,
"step": 1
},
{
"epoch": 0.05734767025089606,
"grad_norm": 4.044816970825195,
"learning_rate": 3e-05,
"loss": 4.0773,
"step": 3
},
{
"epoch": 0.11469534050179211,
"grad_norm": 3.713122844696045,
"learning_rate": 6e-05,
"loss": 3.9077,
"step": 6
},
{
"epoch": 0.17204301075268819,
"grad_norm": 4.895143032073975,
"learning_rate": 9e-05,
"loss": 3.1815,
"step": 9
},
{
"epoch": 0.17204301075268819,
"eval_loss": 2.6312191486358643,
"eval_runtime": 19.7481,
"eval_samples_per_second": 8.963,
"eval_steps_per_second": 4.507,
"step": 9
},
{
"epoch": 0.22939068100358423,
"grad_norm": 2.3883414268493652,
"learning_rate": 0.00012,
"loss": 2.4895,
"step": 12
},
{
"epoch": 0.2867383512544803,
"grad_norm": 1.8660134077072144,
"learning_rate": 0.00015000000000000001,
"loss": 2.1507,
"step": 15
},
{
"epoch": 0.34408602150537637,
"grad_norm": 2.030590534210205,
"learning_rate": 0.00018,
"loss": 2.0577,
"step": 18
},
{
"epoch": 0.34408602150537637,
"eval_loss": 1.9757664203643799,
"eval_runtime": 19.7462,
"eval_samples_per_second": 8.964,
"eval_steps_per_second": 4.507,
"step": 18
},
{
"epoch": 0.4014336917562724,
"grad_norm": 1.5562611818313599,
"learning_rate": 0.0001999229036240723,
"loss": 2.0297,
"step": 21
},
{
"epoch": 0.45878136200716846,
"grad_norm": 1.7431790828704834,
"learning_rate": 0.00019876883405951377,
"loss": 1.861,
"step": 24
},
{
"epoch": 0.5161290322580645,
"grad_norm": 1.7667114734649658,
"learning_rate": 0.00019624552364536473,
"loss": 1.9087,
"step": 27
},
{
"epoch": 0.5161290322580645,
"eval_loss": 1.8730906248092651,
"eval_runtime": 19.7448,
"eval_samples_per_second": 8.964,
"eval_steps_per_second": 4.508,
"step": 27
},
{
"epoch": 0.5734767025089605,
"grad_norm": 1.5921707153320312,
"learning_rate": 0.0001923879532511287,
"loss": 1.8208,
"step": 30
},
{
"epoch": 0.6308243727598566,
"grad_norm": 1.4759997129440308,
"learning_rate": 0.00018724960070727972,
"loss": 1.9535,
"step": 33
},
{
"epoch": 0.6881720430107527,
"grad_norm": 1.3630574941635132,
"learning_rate": 0.00018090169943749476,
"loss": 1.8836,
"step": 36
},
{
"epoch": 0.6881720430107527,
"eval_loss": 1.8447675704956055,
"eval_runtime": 19.7508,
"eval_samples_per_second": 8.962,
"eval_steps_per_second": 4.506,
"step": 36
},
{
"epoch": 0.7455197132616488,
"grad_norm": 1.3893134593963623,
"learning_rate": 0.00017343225094356855,
"loss": 1.9008,
"step": 39
},
{
"epoch": 0.8028673835125448,
"grad_norm": 1.3943053483963013,
"learning_rate": 0.00016494480483301836,
"loss": 1.8282,
"step": 42
},
{
"epoch": 0.8602150537634409,
"grad_norm": 1.3723570108413696,
"learning_rate": 0.00015555702330196023,
"loss": 1.8482,
"step": 45
},
{
"epoch": 0.8602150537634409,
"eval_loss": 1.8272418975830078,
"eval_runtime": 19.7474,
"eval_samples_per_second": 8.963,
"eval_steps_per_second": 4.507,
"step": 45
},
{
"epoch": 0.9175627240143369,
"grad_norm": 1.2157355546951294,
"learning_rate": 0.00014539904997395468,
"loss": 1.7012,
"step": 48
},
{
"epoch": 0.974910394265233,
"grad_norm": 1.4276927709579468,
"learning_rate": 0.0001346117057077493,
"loss": 1.8858,
"step": 51
},
{
"epoch": 1.032258064516129,
"grad_norm": 1.1759012937545776,
"learning_rate": 0.00012334453638559057,
"loss": 2.0545,
"step": 54
},
{
"epoch": 1.032258064516129,
"eval_loss": 1.7873010635375977,
"eval_runtime": 19.7531,
"eval_samples_per_second": 8.961,
"eval_steps_per_second": 4.506,
"step": 54
},
{
"epoch": 1.0896057347670252,
"grad_norm": 1.0551518201828003,
"learning_rate": 0.00011175373974578378,
"loss": 1.4021,
"step": 57
},
{
"epoch": 1.146953405017921,
"grad_norm": 1.3845959901809692,
"learning_rate": 0.0001,
"loss": 1.3838,
"step": 60
},
{
"epoch": 1.2043010752688172,
"grad_norm": 1.4633151292800903,
"learning_rate": 8.824626025421626e-05,
"loss": 1.4003,
"step": 63
},
{
"epoch": 1.2043010752688172,
"eval_loss": 1.873085856437683,
"eval_runtime": 19.7425,
"eval_samples_per_second": 8.965,
"eval_steps_per_second": 4.508,
"step": 63
},
{
"epoch": 1.2616487455197132,
"grad_norm": 2.040634870529175,
"learning_rate": 7.66554636144095e-05,
"loss": 1.4048,
"step": 66
},
{
"epoch": 1.3189964157706093,
"grad_norm": 1.6276676654815674,
"learning_rate": 6.538829429225069e-05,
"loss": 1.3703,
"step": 69
},
{
"epoch": 1.3763440860215055,
"grad_norm": 1.4569168090820312,
"learning_rate": 5.4600950026045326e-05,
"loss": 1.2632,
"step": 72
},
{
"epoch": 1.3763440860215055,
"eval_loss": 1.8426142930984497,
"eval_runtime": 19.7468,
"eval_samples_per_second": 8.963,
"eval_steps_per_second": 4.507,
"step": 72
},
{
"epoch": 1.4336917562724014,
"grad_norm": 1.6304519176483154,
"learning_rate": 4.444297669803981e-05,
"loss": 1.3729,
"step": 75
},
{
"epoch": 1.4910394265232976,
"grad_norm": 1.690694808959961,
"learning_rate": 3.5055195166981645e-05,
"loss": 1.3019,
"step": 78
},
{
"epoch": 1.5483870967741935,
"grad_norm": 1.8786835670471191,
"learning_rate": 2.6567749056431467e-05,
"loss": 1.3178,
"step": 81
},
{
"epoch": 1.5483870967741935,
"eval_loss": 1.8405394554138184,
"eval_runtime": 19.7363,
"eval_samples_per_second": 8.968,
"eval_steps_per_second": 4.509,
"step": 81
},
{
"epoch": 1.6057347670250897,
"grad_norm": 1.877886176109314,
"learning_rate": 1.9098300562505266e-05,
"loss": 1.3041,
"step": 84
},
{
"epoch": 1.6630824372759858,
"grad_norm": 1.9166302680969238,
"learning_rate": 1.2750399292720283e-05,
"loss": 1.3063,
"step": 87
},
{
"epoch": 1.7204301075268817,
"grad_norm": 1.6511739492416382,
"learning_rate": 7.612046748871327e-06,
"loss": 1.2796,
"step": 90
},
{
"epoch": 1.7204301075268817,
"eval_loss": 1.8262274265289307,
"eval_runtime": 19.737,
"eval_samples_per_second": 8.968,
"eval_steps_per_second": 4.509,
"step": 90
},
{
"epoch": 1.7777777777777777,
"grad_norm": 1.6190431118011475,
"learning_rate": 3.7544763546352834e-06,
"loss": 1.2655,
"step": 93
},
{
"epoch": 1.8351254480286738,
"grad_norm": 1.7480906248092651,
"learning_rate": 1.231165940486234e-06,
"loss": 1.374,
"step": 96
},
{
"epoch": 1.89247311827957,
"grad_norm": 1.9425756931304932,
"learning_rate": 7.709637592770991e-08,
"loss": 1.2715,
"step": 99
},
{
"epoch": 1.89247311827957,
"eval_loss": 1.8266067504882812,
"eval_runtime": 19.7351,
"eval_samples_per_second": 8.969,
"eval_steps_per_second": 4.51,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}