leixa's picture
Training in progress, step 100, checkpoint
d97b7be verified
raw
history blame
9.02 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.010311670232785956,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00010311670232785956,
"eval_loss": 2.0299148559570312,
"eval_runtime": 1161.575,
"eval_samples_per_second": 14.062,
"eval_steps_per_second": 1.758,
"step": 1
},
{
"epoch": 0.0003093501069835787,
"grad_norm": 22.63811492919922,
"learning_rate": 3e-05,
"loss": 8.112,
"step": 3
},
{
"epoch": 0.0006187002139671574,
"grad_norm": 13.994317054748535,
"learning_rate": 6e-05,
"loss": 8.7124,
"step": 6
},
{
"epoch": 0.000928050320950736,
"grad_norm": 7.255701065063477,
"learning_rate": 9e-05,
"loss": 6.6208,
"step": 9
},
{
"epoch": 0.000928050320950736,
"eval_loss": 1.5573339462280273,
"eval_runtime": 1167.6686,
"eval_samples_per_second": 13.989,
"eval_steps_per_second": 1.749,
"step": 9
},
{
"epoch": 0.0012374004279343148,
"grad_norm": 6.881002902984619,
"learning_rate": 9.987820251299122e-05,
"loss": 6.2024,
"step": 12
},
{
"epoch": 0.0015467505349178933,
"grad_norm": 7.591043949127197,
"learning_rate": 9.924038765061042e-05,
"loss": 6.0058,
"step": 15
},
{
"epoch": 0.001856100641901472,
"grad_norm": 5.465282440185547,
"learning_rate": 9.806308479691595e-05,
"loss": 5.2033,
"step": 18
},
{
"epoch": 0.001856100641901472,
"eval_loss": 1.4896771907806396,
"eval_runtime": 1167.5877,
"eval_samples_per_second": 13.99,
"eval_steps_per_second": 1.749,
"step": 18
},
{
"epoch": 0.0021654507488850505,
"grad_norm": 8.633875846862793,
"learning_rate": 9.635919272833938e-05,
"loss": 5.7537,
"step": 21
},
{
"epoch": 0.0024748008558686295,
"grad_norm": 10.359308242797852,
"learning_rate": 9.414737964294636e-05,
"loss": 6.1929,
"step": 24
},
{
"epoch": 0.002784150962852208,
"grad_norm": 5.20798397064209,
"learning_rate": 9.145187862775209e-05,
"loss": 5.966,
"step": 27
},
{
"epoch": 0.002784150962852208,
"eval_loss": 1.4642568826675415,
"eval_runtime": 1167.6125,
"eval_samples_per_second": 13.989,
"eval_steps_per_second": 1.749,
"step": 27
},
{
"epoch": 0.0030935010698357867,
"grad_norm": 7.362185478210449,
"learning_rate": 8.83022221559489e-05,
"loss": 6.0468,
"step": 30
},
{
"epoch": 0.0034028511768193653,
"grad_norm": 7.112700939178467,
"learning_rate": 8.473291852294987e-05,
"loss": 6.4047,
"step": 33
},
{
"epoch": 0.003712201283802944,
"grad_norm": 6.42846155166626,
"learning_rate": 8.07830737662829e-05,
"loss": 5.5313,
"step": 36
},
{
"epoch": 0.003712201283802944,
"eval_loss": 1.4507426023483276,
"eval_runtime": 1167.4801,
"eval_samples_per_second": 13.991,
"eval_steps_per_second": 1.749,
"step": 36
},
{
"epoch": 0.004021551390786523,
"grad_norm": 4.613251686096191,
"learning_rate": 7.649596321166024e-05,
"loss": 5.437,
"step": 39
},
{
"epoch": 0.004330901497770101,
"grad_norm": 7.317685604095459,
"learning_rate": 7.191855733945387e-05,
"loss": 5.8941,
"step": 42
},
{
"epoch": 0.00464025160475368,
"grad_norm": 6.0703606605529785,
"learning_rate": 6.710100716628344e-05,
"loss": 5.9139,
"step": 45
},
{
"epoch": 0.00464025160475368,
"eval_loss": 1.442267656326294,
"eval_runtime": 1167.7611,
"eval_samples_per_second": 13.987,
"eval_steps_per_second": 1.749,
"step": 45
},
{
"epoch": 0.004949601711737259,
"grad_norm": 4.372334957122803,
"learning_rate": 6.209609477998338e-05,
"loss": 5.2514,
"step": 48
},
{
"epoch": 0.005258951818720837,
"grad_norm": 6.997920513153076,
"learning_rate": 5.695865504800327e-05,
"loss": 6.0713,
"step": 51
},
{
"epoch": 0.005568301925704416,
"grad_norm": 6.882256031036377,
"learning_rate": 5.174497483512506e-05,
"loss": 6.3112,
"step": 54
},
{
"epoch": 0.005568301925704416,
"eval_loss": 1.4337407350540161,
"eval_runtime": 1167.2604,
"eval_samples_per_second": 13.993,
"eval_steps_per_second": 1.749,
"step": 54
},
{
"epoch": 0.005877652032687994,
"grad_norm": 4.815347194671631,
"learning_rate": 4.6512176312793736e-05,
"loss": 5.721,
"step": 57
},
{
"epoch": 0.006187002139671573,
"grad_norm": 8.502628326416016,
"learning_rate": 4.131759111665349e-05,
"loss": 5.9287,
"step": 60
},
{
"epoch": 0.0064963522466551515,
"grad_norm": 8.623035430908203,
"learning_rate": 3.6218132209150045e-05,
"loss": 6.0742,
"step": 63
},
{
"epoch": 0.0064963522466551515,
"eval_loss": 1.42971670627594,
"eval_runtime": 1166.8218,
"eval_samples_per_second": 13.999,
"eval_steps_per_second": 1.75,
"step": 63
},
{
"epoch": 0.0068057023536387306,
"grad_norm": 6.28512716293335,
"learning_rate": 3.12696703292044e-05,
"loss": 6.1469,
"step": 66
},
{
"epoch": 0.00711505246062231,
"grad_norm": 6.072748184204102,
"learning_rate": 2.6526421860705473e-05,
"loss": 6.4233,
"step": 69
},
{
"epoch": 0.007424402567605888,
"grad_norm": 5.338780403137207,
"learning_rate": 2.2040354826462668e-05,
"loss": 5.9596,
"step": 72
},
{
"epoch": 0.007424402567605888,
"eval_loss": 1.4262299537658691,
"eval_runtime": 1166.5213,
"eval_samples_per_second": 14.002,
"eval_steps_per_second": 1.751,
"step": 72
},
{
"epoch": 0.007733752674589467,
"grad_norm": 5.755375385284424,
"learning_rate": 1.7860619515673033e-05,
"loss": 4.9325,
"step": 75
},
{
"epoch": 0.008043102781573046,
"grad_norm": 7.211399078369141,
"learning_rate": 1.4033009983067452e-05,
"loss": 5.6665,
"step": 78
},
{
"epoch": 0.008352452888556624,
"grad_norm": 9.436430931091309,
"learning_rate": 1.0599462319663905e-05,
"loss": 5.676,
"step": 81
},
{
"epoch": 0.008352452888556624,
"eval_loss": 1.4237958192825317,
"eval_runtime": 1166.3522,
"eval_samples_per_second": 14.004,
"eval_steps_per_second": 1.751,
"step": 81
},
{
"epoch": 0.008661802995540202,
"grad_norm": 9.019287109375,
"learning_rate": 7.597595192178702e-06,
"loss": 6.0424,
"step": 84
},
{
"epoch": 0.008971153102523782,
"grad_norm": 5.952609062194824,
"learning_rate": 5.060297685041659e-06,
"loss": 5.8633,
"step": 87
},
{
"epoch": 0.00928050320950736,
"grad_norm": 5.024654388427734,
"learning_rate": 3.0153689607045845e-06,
"loss": 5.4096,
"step": 90
},
{
"epoch": 0.00928050320950736,
"eval_loss": 1.422675371170044,
"eval_runtime": 1166.5377,
"eval_samples_per_second": 14.002,
"eval_steps_per_second": 1.75,
"step": 90
},
{
"epoch": 0.009589853316490938,
"grad_norm": 3.980428457260132,
"learning_rate": 1.4852136862001764e-06,
"loss": 4.9397,
"step": 93
},
{
"epoch": 0.009899203423474518,
"grad_norm": 4.345184803009033,
"learning_rate": 4.865965629214819e-07,
"loss": 5.0577,
"step": 96
},
{
"epoch": 0.010208553530458096,
"grad_norm": 8.817083358764648,
"learning_rate": 3.04586490452119e-08,
"loss": 6.205,
"step": 99
},
{
"epoch": 0.010208553530458096,
"eval_loss": 1.4224265813827515,
"eval_runtime": 1166.663,
"eval_samples_per_second": 14.001,
"eval_steps_per_second": 1.75,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.4625093572571955e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}