nttx's picture
Training in progress, epoch 1, checkpoint
03c7811 verified
raw
history blame
9.83 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 59,
"global_step": 236,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00423728813559322,
"eval_loss": 1.7249085903167725,
"eval_runtime": 8.4858,
"eval_samples_per_second": 11.784,
"eval_steps_per_second": 5.892,
"step": 1
},
{
"epoch": 0.0211864406779661,
"grad_norm": 0.3638346493244171,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.3688,
"step": 5
},
{
"epoch": 0.0423728813559322,
"grad_norm": 0.24444879591464996,
"learning_rate": 3.3333333333333335e-05,
"loss": 1.7376,
"step": 10
},
{
"epoch": 0.0635593220338983,
"grad_norm": 0.32083624601364136,
"learning_rate": 5e-05,
"loss": 1.8018,
"step": 15
},
{
"epoch": 0.0847457627118644,
"grad_norm": 0.5033571720123291,
"learning_rate": 6.666666666666667e-05,
"loss": 1.7648,
"step": 20
},
{
"epoch": 0.1059322033898305,
"grad_norm": 0.5790311694145203,
"learning_rate": 8.333333333333334e-05,
"loss": 1.7307,
"step": 25
},
{
"epoch": 0.1271186440677966,
"grad_norm": 0.7558295130729675,
"learning_rate": 0.0001,
"loss": 1.6065,
"step": 30
},
{
"epoch": 0.1483050847457627,
"grad_norm": 0.5623775720596313,
"learning_rate": 9.985471028179154e-05,
"loss": 1.6408,
"step": 35
},
{
"epoch": 0.1694915254237288,
"grad_norm": 0.6156546473503113,
"learning_rate": 9.941968549125481e-05,
"loss": 1.5529,
"step": 40
},
{
"epoch": 0.1906779661016949,
"grad_norm": 0.7409376502037048,
"learning_rate": 9.869745381355906e-05,
"loss": 1.731,
"step": 45
},
{
"epoch": 0.211864406779661,
"grad_norm": 2.1485629081726074,
"learning_rate": 9.769221256218164e-05,
"loss": 1.5325,
"step": 50
},
{
"epoch": 0.2330508474576271,
"grad_norm": 0.4415481686592102,
"learning_rate": 9.64098037858483e-05,
"loss": 1.2719,
"step": 55
},
{
"epoch": 0.25,
"eval_loss": 1.4849549531936646,
"eval_runtime": 8.6149,
"eval_samples_per_second": 11.608,
"eval_steps_per_second": 5.804,
"step": 59
},
{
"epoch": 0.2542372881355932,
"grad_norm": 0.3637336194515228,
"learning_rate": 9.485768031694872e-05,
"loss": 1.5802,
"step": 60
},
{
"epoch": 0.2754237288135593,
"grad_norm": 0.36982426047325134,
"learning_rate": 9.304486245873972e-05,
"loss": 1.6654,
"step": 65
},
{
"epoch": 0.2966101694915254,
"grad_norm": 0.5281253457069397,
"learning_rate": 9.098188556305263e-05,
"loss": 1.7937,
"step": 70
},
{
"epoch": 0.3177966101694915,
"grad_norm": 0.4794275164604187,
"learning_rate": 8.868073880316124e-05,
"loss": 1.542,
"step": 75
},
{
"epoch": 0.3389830508474576,
"grad_norm": 0.4679938852787018,
"learning_rate": 8.615479549763756e-05,
"loss": 1.7178,
"step": 80
},
{
"epoch": 0.3601694915254237,
"grad_norm": 0.6459900736808777,
"learning_rate": 8.341873539012444e-05,
"loss": 1.5279,
"step": 85
},
{
"epoch": 0.3813559322033898,
"grad_norm": 0.774486243724823,
"learning_rate": 8.048845933670273e-05,
"loss": 1.5344,
"step": 90
},
{
"epoch": 0.4025423728813559,
"grad_norm": 0.9203367233276367,
"learning_rate": 7.73809968966554e-05,
"loss": 1.3637,
"step": 95
},
{
"epoch": 0.423728813559322,
"grad_norm": 1.7692137956619263,
"learning_rate": 7.411440736367281e-05,
"loss": 1.421,
"step": 100
},
{
"epoch": 0.4449152542372881,
"grad_norm": 0.3940534293651581,
"learning_rate": 7.070767481266492e-05,
"loss": 1.1592,
"step": 105
},
{
"epoch": 0.4661016949152542,
"grad_norm": 0.31518927216529846,
"learning_rate": 6.718059777212567e-05,
"loss": 1.5224,
"step": 110
},
{
"epoch": 0.4872881355932203,
"grad_norm": 0.4130525290966034,
"learning_rate": 6.355367416322779e-05,
"loss": 1.6624,
"step": 115
},
{
"epoch": 0.5,
"eval_loss": 1.4531333446502686,
"eval_runtime": 8.5017,
"eval_samples_per_second": 11.762,
"eval_steps_per_second": 5.881,
"step": 118
},
{
"epoch": 0.5084745762711864,
"grad_norm": 0.4499770700931549,
"learning_rate": 5.9847982174335316e-05,
"loss": 1.7216,
"step": 120
},
{
"epoch": 0.5296610169491526,
"grad_norm": 0.47381460666656494,
"learning_rate": 5.608505776324158e-05,
"loss": 1.4514,
"step": 125
},
{
"epoch": 0.5508474576271186,
"grad_norm": 0.5892888307571411,
"learning_rate": 5.228676949903973e-05,
"loss": 1.5234,
"step": 130
},
{
"epoch": 0.5720338983050848,
"grad_norm": 0.590027391910553,
"learning_rate": 4.847519147099294e-05,
"loss": 1.4497,
"step": 135
},
{
"epoch": 0.5932203389830508,
"grad_norm": 0.6537613868713379,
"learning_rate": 4.46724750030062e-05,
"loss": 1.5984,
"step": 140
},
{
"epoch": 0.614406779661017,
"grad_norm": 0.8353936672210693,
"learning_rate": 4.0900719919241935e-05,
"loss": 1.4847,
"step": 145
},
{
"epoch": 0.635593220338983,
"grad_norm": 1.600169062614441,
"learning_rate": 3.7181846109031005e-05,
"loss": 1.6274,
"step": 150
},
{
"epoch": 0.6567796610169492,
"grad_norm": 0.31450155377388,
"learning_rate": 3.353746613749094e-05,
"loss": 1.1725,
"step": 155
},
{
"epoch": 0.6779661016949152,
"grad_norm": 0.3330669403076172,
"learning_rate": 2.9988759642186097e-05,
"loss": 1.392,
"step": 160
},
{
"epoch": 0.6991525423728814,
"grad_norm": 0.35386720299720764,
"learning_rate": 2.6556350245784833e-05,
"loss": 1.6059,
"step": 165
},
{
"epoch": 0.7203389830508474,
"grad_norm": 0.4171987771987915,
"learning_rate": 2.3260185700046294e-05,
"loss": 1.6118,
"step": 170
},
{
"epoch": 0.7415254237288136,
"grad_norm": 0.43384310603141785,
"learning_rate": 2.011942195769122e-05,
"loss": 1.3333,
"step": 175
},
{
"epoch": 0.75,
"eval_loss": 1.4390202760696411,
"eval_runtime": 8.6381,
"eval_samples_per_second": 11.577,
"eval_steps_per_second": 5.788,
"step": 177
},
{
"epoch": 0.7627118644067796,
"grad_norm": 0.5793793797492981,
"learning_rate": 1.7152311845883095e-05,
"loss": 1.4983,
"step": 180
},
{
"epoch": 0.7838983050847458,
"grad_norm": 0.6938722729682922,
"learning_rate": 1.4376098988303405e-05,
"loss": 1.3863,
"step": 185
},
{
"epoch": 0.8050847457627118,
"grad_norm": 0.6867596507072449,
"learning_rate": 1.1806917592302762e-05,
"loss": 1.6187,
"step": 190
},
{
"epoch": 0.826271186440678,
"grad_norm": 0.9748066663742065,
"learning_rate": 9.459698683523204e-06,
"loss": 1.4349,
"step": 195
},
{
"epoch": 0.847457627118644,
"grad_norm": 1.923925757408142,
"learning_rate": 7.348083332917926e-06,
"loss": 1.5132,
"step": 200
},
{
"epoch": 0.8686440677966102,
"grad_norm": 0.32460537552833557,
"learning_rate": 5.484343380457125e-06,
"loss": 1.3116,
"step": 205
},
{
"epoch": 0.8898305084745762,
"grad_norm": 0.40792253613471985,
"learning_rate": 3.879310116241042e-06,
"loss": 1.5229,
"step": 210
},
{
"epoch": 0.9110169491525424,
"grad_norm": 0.417889267206192,
"learning_rate": 2.542311333496622e-06,
"loss": 1.4959,
"step": 215
},
{
"epoch": 0.9322033898305084,
"grad_norm": 0.5283035039901733,
"learning_rate": 1.4811171192794627e-06,
"loss": 1.5565,
"step": 220
},
{
"epoch": 0.9533898305084746,
"grad_norm": 0.6696847677230835,
"learning_rate": 7.018946979234997e-07,
"loss": 1.4598,
"step": 225
},
{
"epoch": 0.9745762711864406,
"grad_norm": 0.6265289187431335,
"learning_rate": 2.0917258966953733e-07,
"loss": 1.5206,
"step": 230
},
{
"epoch": 0.9957627118644068,
"grad_norm": 1.065112829208374,
"learning_rate": 5.814292768108187e-09,
"loss": 1.5803,
"step": 235
},
{
"epoch": 1.0,
"eval_loss": 1.4057904481887817,
"eval_runtime": 8.627,
"eval_samples_per_second": 11.592,
"eval_steps_per_second": 5.796,
"step": 236
}
],
"logging_steps": 5,
"max_steps": 236,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.267951517990912e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}