ThaiT5-Instruct / trainer_state.json
Peenipat's picture
Upload 12 files
cb64f8f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 12.991322457480042,
"eval_steps": 500,
"global_step": 18720,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.3471017007983339,
"grad_norm": 9.175567626953125,
"learning_rate": 2e-05,
"loss": 10.9573,
"step": 500
},
{
"epoch": 0.6942034015966678,
"grad_norm": 6.246857166290283,
"learning_rate": 1.9451152579582877e-05,
"loss": 2.2463,
"step": 1000
},
{
"epoch": 1.0,
"eval_loss": 1.4781490564346313,
"eval_runtime": 78.7792,
"eval_samples_per_second": 146.282,
"eval_steps_per_second": 4.582,
"step": 1441
},
{
"epoch": 1.0409580006942034,
"grad_norm": 0.7370556592941284,
"learning_rate": 1.890230515916575e-05,
"loss": 1.8168,
"step": 1500
},
{
"epoch": 1.3880597014925373,
"grad_norm": 600.9544677734375,
"learning_rate": 1.835345773874863e-05,
"loss": 2.8979,
"step": 2000
},
{
"epoch": 1.7351614022908712,
"grad_norm": 0.528658390045166,
"learning_rate": 1.7804610318331505e-05,
"loss": 1.701,
"step": 2500
},
{
"epoch": 2.0,
"eval_loss": 1.3760780096054077,
"eval_runtime": 78.8326,
"eval_samples_per_second": 146.183,
"eval_steps_per_second": 4.579,
"step": 2882
},
{
"epoch": 2.081916001388407,
"grad_norm": 0.45837515592575073,
"learning_rate": 1.725576289791438e-05,
"loss": 1.5956,
"step": 3000
},
{
"epoch": 2.4290177021867407,
"grad_norm": 0.41563984751701355,
"learning_rate": 1.6706915477497255e-05,
"loss": 1.5615,
"step": 3500
},
{
"epoch": 2.7761194029850746,
"grad_norm": 0.4553076922893524,
"learning_rate": 1.6158068057080134e-05,
"loss": 1.5261,
"step": 4000
},
{
"epoch": 3.0,
"eval_loss": 1.3131805658340454,
"eval_runtime": 78.7183,
"eval_samples_per_second": 146.396,
"eval_steps_per_second": 4.586,
"step": 4323
},
{
"epoch": 3.12287400208261,
"grad_norm": 0.4429566562175751,
"learning_rate": 1.560922063666301e-05,
"loss": 1.4933,
"step": 4500
},
{
"epoch": 3.469975702880944,
"grad_norm": 0.4545910656452179,
"learning_rate": 1.5060373216245885e-05,
"loss": 1.4766,
"step": 5000
},
{
"epoch": 3.817077403679278,
"grad_norm": 0.4629240930080414,
"learning_rate": 1.451152579582876e-05,
"loss": 1.4626,
"step": 5500
},
{
"epoch": 4.0,
"eval_loss": 1.2775077819824219,
"eval_runtime": 78.6454,
"eval_samples_per_second": 146.531,
"eval_steps_per_second": 4.59,
"step": 5764
},
{
"epoch": 4.163832002776814,
"grad_norm": 0.3974866271018982,
"learning_rate": 1.3962678375411635e-05,
"loss": 1.448,
"step": 6000
},
{
"epoch": 4.510933703575148,
"grad_norm": 0.4241460859775543,
"learning_rate": 1.3413830954994514e-05,
"loss": 1.4241,
"step": 6500
},
{
"epoch": 4.8580354043734815,
"grad_norm": 0.4666702151298523,
"learning_rate": 1.2864983534577389e-05,
"loss": 1.4085,
"step": 7000
},
{
"epoch": 5.0,
"eval_loss": 1.254936695098877,
"eval_runtime": 78.5765,
"eval_samples_per_second": 146.66,
"eval_steps_per_second": 4.594,
"step": 7205
},
{
"epoch": 5.204790003471017,
"grad_norm": 0.4225795567035675,
"learning_rate": 1.2316136114160264e-05,
"loss": 1.395,
"step": 7500
},
{
"epoch": 5.551891704269351,
"grad_norm": 0.42448151111602783,
"learning_rate": 1.176728869374314e-05,
"loss": 1.3938,
"step": 8000
},
{
"epoch": 5.898993405067685,
"grad_norm": 0.47224199771881104,
"learning_rate": 1.1218441273326017e-05,
"loss": 1.3844,
"step": 8500
},
{
"epoch": 6.0,
"eval_loss": 1.2364274263381958,
"eval_runtime": 78.7665,
"eval_samples_per_second": 146.306,
"eval_steps_per_second": 4.583,
"step": 8646
},
{
"epoch": 6.24574800416522,
"grad_norm": 0.3676798939704895,
"learning_rate": 1.0669593852908892e-05,
"loss": 1.3668,
"step": 9000
},
{
"epoch": 6.592849704963554,
"grad_norm": 0.37000757455825806,
"learning_rate": 1.0120746432491769e-05,
"loss": 1.3629,
"step": 9500
},
{
"epoch": 6.939951405761888,
"grad_norm": 0.45095133781433105,
"learning_rate": 9.571899012074644e-06,
"loss": 1.3647,
"step": 10000
},
{
"epoch": 7.0,
"eval_loss": 1.222563624382019,
"eval_runtime": 78.5655,
"eval_samples_per_second": 146.68,
"eval_steps_per_second": 4.595,
"step": 10087
},
{
"epoch": 7.286706004859424,
"grad_norm": 0.4932011365890503,
"learning_rate": 9.02305159165752e-06,
"loss": 1.358,
"step": 10500
},
{
"epoch": 7.633807705657758,
"grad_norm": 0.36681583523750305,
"learning_rate": 8.474204171240396e-06,
"loss": 1.3422,
"step": 11000
},
{
"epoch": 7.9809094064560915,
"grad_norm": 0.39713332056999207,
"learning_rate": 7.925356750823272e-06,
"loss": 1.3442,
"step": 11500
},
{
"epoch": 8.0,
"eval_loss": 1.214061975479126,
"eval_runtime": 78.596,
"eval_samples_per_second": 146.623,
"eval_steps_per_second": 4.593,
"step": 11528
},
{
"epoch": 8.327664005553627,
"grad_norm": 0.4920157194137573,
"learning_rate": 7.376509330406147e-06,
"loss": 1.3358,
"step": 12000
},
{
"epoch": 8.67476570635196,
"grad_norm": 0.4292038083076477,
"learning_rate": 6.827661909989024e-06,
"loss": 1.3373,
"step": 12500
},
{
"epoch": 9.0,
"eval_loss": 1.2043074369430542,
"eval_runtime": 78.6255,
"eval_samples_per_second": 146.568,
"eval_steps_per_second": 4.591,
"step": 12969
},
{
"epoch": 9.021520305449497,
"grad_norm": 0.390511691570282,
"learning_rate": 6.278814489571899e-06,
"loss": 1.3305,
"step": 13000
},
{
"epoch": 9.368622006247831,
"grad_norm": 0.42470699548721313,
"learning_rate": 5.729967069154776e-06,
"loss": 1.3294,
"step": 13500
},
{
"epoch": 9.715723707046164,
"grad_norm": 0.4358290135860443,
"learning_rate": 5.181119648737652e-06,
"loss": 1.3182,
"step": 14000
},
{
"epoch": 10.0,
"eval_loss": 1.1995124816894531,
"eval_runtime": 78.7492,
"eval_samples_per_second": 146.338,
"eval_steps_per_second": 4.584,
"step": 14410
},
{
"epoch": 10.0624783061437,
"grad_norm": 0.37242305278778076,
"learning_rate": 4.632272228320528e-06,
"loss": 1.3192,
"step": 14500
},
{
"epoch": 10.409580006942035,
"grad_norm": 0.4692925810813904,
"learning_rate": 4.0834248079034035e-06,
"loss": 1.3177,
"step": 15000
},
{
"epoch": 10.756681707740368,
"grad_norm": 0.42067646980285645,
"learning_rate": 3.5345773874862793e-06,
"loss": 1.3169,
"step": 15500
},
{
"epoch": 11.0,
"eval_loss": 1.1953924894332886,
"eval_runtime": 78.6673,
"eval_samples_per_second": 146.49,
"eval_steps_per_second": 4.589,
"step": 15851
},
{
"epoch": 11.103436306837903,
"grad_norm": 0.4010460674762726,
"learning_rate": 2.9857299670691548e-06,
"loss": 1.3145,
"step": 16000
},
{
"epoch": 11.450538007636238,
"grad_norm": 0.4176557660102844,
"learning_rate": 2.436882546652031e-06,
"loss": 1.3063,
"step": 16500
},
{
"epoch": 11.797639708434572,
"grad_norm": 0.3621806800365448,
"learning_rate": 1.888035126234907e-06,
"loss": 1.3195,
"step": 17000
},
{
"epoch": 12.0,
"eval_loss": 1.1941872835159302,
"eval_runtime": 78.6747,
"eval_samples_per_second": 146.477,
"eval_steps_per_second": 4.589,
"step": 17292
},
{
"epoch": 12.144394307532107,
"grad_norm": 0.41595444083213806,
"learning_rate": 1.3391877058177828e-06,
"loss": 1.3222,
"step": 17500
},
{
"epoch": 12.49149600833044,
"grad_norm": 0.40309804677963257,
"learning_rate": 7.903402854006587e-07,
"loss": 1.3122,
"step": 18000
},
{
"epoch": 12.838597709128775,
"grad_norm": 0.4489055871963501,
"learning_rate": 2.414928649835346e-07,
"loss": 1.3016,
"step": 18500
},
{
"epoch": 12.991322457480042,
"eval_loss": 1.19288170337677,
"eval_runtime": 78.6526,
"eval_samples_per_second": 146.518,
"eval_steps_per_second": 4.59,
"step": 18720
}
],
"logging_steps": 500,
"max_steps": 18720,
"num_input_tokens_seen": 0,
"num_train_epochs": 13,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.070590055763149e+17,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}