mamung's picture
Training in progress, step 100, checkpoint
ac7879f verified
raw
history blame
8.89 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.905320108205591,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.028854824165915238,
"eval_loss": 2.2234888076782227,
"eval_runtime": 63.3246,
"eval_samples_per_second": 1.848,
"eval_steps_per_second": 0.932,
"step": 1
},
{
"epoch": 0.08656447249774572,
"grad_norm": 11.650296211242676,
"learning_rate": 3e-05,
"loss": 71.8161,
"step": 3
},
{
"epoch": 0.17312894499549145,
"grad_norm": 12.471053123474121,
"learning_rate": 6e-05,
"loss": 72.0251,
"step": 6
},
{
"epoch": 0.25969341749323716,
"grad_norm": 11.917525291442871,
"learning_rate": 9e-05,
"loss": 66.3847,
"step": 9
},
{
"epoch": 0.25969341749323716,
"eval_loss": 1.9188385009765625,
"eval_runtime": 63.2986,
"eval_samples_per_second": 1.848,
"eval_steps_per_second": 0.932,
"step": 9
},
{
"epoch": 0.3462578899909829,
"grad_norm": 11.10000991821289,
"learning_rate": 0.00012,
"loss": 59.632,
"step": 12
},
{
"epoch": 0.4328223624887286,
"grad_norm": 12.129708290100098,
"learning_rate": 0.00015000000000000001,
"loss": 55.7109,
"step": 15
},
{
"epoch": 0.5193868349864743,
"grad_norm": 7.577877044677734,
"learning_rate": 0.00018,
"loss": 51.9351,
"step": 18
},
{
"epoch": 0.5193868349864743,
"eval_loss": 1.5988303422927856,
"eval_runtime": 63.413,
"eval_samples_per_second": 1.845,
"eval_steps_per_second": 0.93,
"step": 18
},
{
"epoch": 0.60595130748422,
"grad_norm": 6.87202262878418,
"learning_rate": 0.0001999229036240723,
"loss": 51.0237,
"step": 21
},
{
"epoch": 0.6925157799819658,
"grad_norm": 6.496087551116943,
"learning_rate": 0.00019876883405951377,
"loss": 48.9338,
"step": 24
},
{
"epoch": 0.7790802524797115,
"grad_norm": 6.911287784576416,
"learning_rate": 0.00019624552364536473,
"loss": 47.7303,
"step": 27
},
{
"epoch": 0.7790802524797115,
"eval_loss": 1.4810194969177246,
"eval_runtime": 63.4002,
"eval_samples_per_second": 1.845,
"eval_steps_per_second": 0.931,
"step": 27
},
{
"epoch": 0.8656447249774571,
"grad_norm": 6.416966915130615,
"learning_rate": 0.0001923879532511287,
"loss": 47.3634,
"step": 30
},
{
"epoch": 0.9522091974752029,
"grad_norm": 6.382626056671143,
"learning_rate": 0.00018724960070727972,
"loss": 46.6147,
"step": 33
},
{
"epoch": 1.048692515779982,
"grad_norm": 7.635010719299316,
"learning_rate": 0.00018090169943749476,
"loss": 46.168,
"step": 36
},
{
"epoch": 1.048692515779982,
"eval_loss": 1.4284162521362305,
"eval_runtime": 63.4232,
"eval_samples_per_second": 1.845,
"eval_steps_per_second": 0.93,
"step": 36
},
{
"epoch": 1.1352569882777277,
"grad_norm": 7.26214075088501,
"learning_rate": 0.00017343225094356855,
"loss": 42.9364,
"step": 39
},
{
"epoch": 1.2218214607754734,
"grad_norm": 8.307271003723145,
"learning_rate": 0.00016494480483301836,
"loss": 43.8462,
"step": 42
},
{
"epoch": 1.308385933273219,
"grad_norm": 8.332019805908203,
"learning_rate": 0.00015555702330196023,
"loss": 43.2469,
"step": 45
},
{
"epoch": 1.308385933273219,
"eval_loss": 1.39437735080719,
"eval_runtime": 63.2944,
"eval_samples_per_second": 1.849,
"eval_steps_per_second": 0.932,
"step": 45
},
{
"epoch": 1.394950405770965,
"grad_norm": 8.096892356872559,
"learning_rate": 0.00014539904997395468,
"loss": 41.2373,
"step": 48
},
{
"epoch": 1.4815148782687104,
"grad_norm": 8.60307502746582,
"learning_rate": 0.0001346117057077493,
"loss": 42.4838,
"step": 51
},
{
"epoch": 1.5680793507664563,
"grad_norm": 8.435927391052246,
"learning_rate": 0.00012334453638559057,
"loss": 43.469,
"step": 54
},
{
"epoch": 1.5680793507664563,
"eval_loss": 1.376704454421997,
"eval_runtime": 63.2953,
"eval_samples_per_second": 1.848,
"eval_steps_per_second": 0.932,
"step": 54
},
{
"epoch": 1.654643823264202,
"grad_norm": 9.683271408081055,
"learning_rate": 0.00011175373974578378,
"loss": 41.1976,
"step": 57
},
{
"epoch": 1.7412082957619477,
"grad_norm": 8.922101974487305,
"learning_rate": 0.0001,
"loss": 41.2615,
"step": 60
},
{
"epoch": 1.8277727682596934,
"grad_norm": 8.805673599243164,
"learning_rate": 8.824626025421626e-05,
"loss": 40.953,
"step": 63
},
{
"epoch": 1.8277727682596934,
"eval_loss": 1.3633700609207153,
"eval_runtime": 63.3102,
"eval_samples_per_second": 1.848,
"eval_steps_per_second": 0.932,
"step": 63
},
{
"epoch": 1.914337240757439,
"grad_norm": 9.028271675109863,
"learning_rate": 7.66554636144095e-05,
"loss": 40.7417,
"step": 66
},
{
"epoch": 2.010820559062218,
"grad_norm": 9.204501152038574,
"learning_rate": 6.538829429225069e-05,
"loss": 41.2274,
"step": 69
},
{
"epoch": 2.097385031559964,
"grad_norm": 9.566389083862305,
"learning_rate": 5.4600950026045326e-05,
"loss": 37.3547,
"step": 72
},
{
"epoch": 2.097385031559964,
"eval_loss": 1.3545383214950562,
"eval_runtime": 63.2718,
"eval_samples_per_second": 1.849,
"eval_steps_per_second": 0.932,
"step": 72
},
{
"epoch": 2.1839495040577095,
"grad_norm": 10.605365753173828,
"learning_rate": 4.444297669803981e-05,
"loss": 36.6109,
"step": 75
},
{
"epoch": 2.2705139765554554,
"grad_norm": 11.574414253234863,
"learning_rate": 3.5055195166981645e-05,
"loss": 37.0909,
"step": 78
},
{
"epoch": 2.3570784490532013,
"grad_norm": 11.285858154296875,
"learning_rate": 2.6567749056431467e-05,
"loss": 38.6229,
"step": 81
},
{
"epoch": 2.3570784490532013,
"eval_loss": 1.3696061372756958,
"eval_runtime": 63.2953,
"eval_samples_per_second": 1.848,
"eval_steps_per_second": 0.932,
"step": 81
},
{
"epoch": 2.4436429215509468,
"grad_norm": 11.047082901000977,
"learning_rate": 1.9098300562505266e-05,
"loss": 37.311,
"step": 84
},
{
"epoch": 2.5302073940486927,
"grad_norm": 11.326862335205078,
"learning_rate": 1.2750399292720283e-05,
"loss": 35.5301,
"step": 87
},
{
"epoch": 2.616771866546438,
"grad_norm": 10.88636302947998,
"learning_rate": 7.612046748871327e-06,
"loss": 36.4226,
"step": 90
},
{
"epoch": 2.616771866546438,
"eval_loss": 1.3591108322143555,
"eval_runtime": 63.3023,
"eval_samples_per_second": 1.848,
"eval_steps_per_second": 0.932,
"step": 90
},
{
"epoch": 2.703336339044184,
"grad_norm": 10.809871673583984,
"learning_rate": 3.7544763546352834e-06,
"loss": 37.0722,
"step": 93
},
{
"epoch": 2.78990081154193,
"grad_norm": 11.037607192993164,
"learning_rate": 1.231165940486234e-06,
"loss": 35.8129,
"step": 96
},
{
"epoch": 2.8764652840396754,
"grad_norm": 10.600300788879395,
"learning_rate": 7.709637592770991e-08,
"loss": 35.964,
"step": 99
},
{
"epoch": 2.8764652840396754,
"eval_loss": 1.3585889339447021,
"eval_runtime": 63.2827,
"eval_samples_per_second": 1.849,
"eval_steps_per_second": 0.932,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.020574826496e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}