|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 500, |
|
"global_step": 61, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"grad_norm": 3.765191996253847, |
|
"learning_rate": 2.5e-07, |
|
"loss": 1.0461, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"grad_norm": 4.0641282279063455, |
|
"learning_rate": 5e-07, |
|
"loss": 1.0458, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"grad_norm": 3.7765443388481783, |
|
"learning_rate": 4.996456739191904e-07, |
|
"loss": 1.0684, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 3.4137815575539796, |
|
"learning_rate": 4.985837000525343e-07, |
|
"loss": 1.0314, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 3.3692459406727107, |
|
"learning_rate": 4.968170886803361e-07, |
|
"loss": 1.0465, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"grad_norm": 3.41903215526812, |
|
"learning_rate": 4.943508474544666e-07, |
|
"loss": 0.9973, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 3.5409185994032644, |
|
"learning_rate": 4.91191967203629e-07, |
|
"loss": 1.0003, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"grad_norm": 3.2875389190594584, |
|
"learning_rate": 4.873494021170954e-07, |
|
"loss": 1.0126, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"grad_norm": 3.1966679962173004, |
|
"learning_rate": 4.828340443630846e-07, |
|
"loss": 1.0149, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 3.220278106251628, |
|
"learning_rate": 4.776586932137283e-07, |
|
"loss": 1.0149, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 5.120652614407811, |
|
"learning_rate": 4.7183801876414286e-07, |
|
"loss": 1.0208, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 3.428077957669144, |
|
"learning_rate": 4.653885203484515e-07, |
|
"loss": 1.0378, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"grad_norm": 3.7962383367663906, |
|
"learning_rate": 4.583284797706287e-07, |
|
"loss": 1.0135, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"grad_norm": 2.9907595309256267, |
|
"learning_rate": 4.5067790948274085e-07, |
|
"loss": 0.9998, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 3.161110263982432, |
|
"learning_rate": 4.4245849585747655e-07, |
|
"loss": 1.0069, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.26, |
|
"grad_norm": 2.886380477987417, |
|
"learning_rate": 4.336935377157668e-07, |
|
"loss": 0.9827, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 3.376879009946129, |
|
"learning_rate": 4.244078802837462e-07, |
|
"loss": 0.9783, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.3, |
|
"grad_norm": 2.6892749834464516, |
|
"learning_rate": 4.146278447662597e-07, |
|
"loss": 0.9585, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 2.3786851686843105, |
|
"learning_rate": 4.0438115373654795e-07, |
|
"loss": 0.9677, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"grad_norm": 2.1590733636497754, |
|
"learning_rate": 3.9369685255360173e-07, |
|
"loss": 0.9472, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"grad_norm": 1.9865479320238961, |
|
"learning_rate": 3.826052270299356e-07, |
|
"loss": 0.9533, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 2.0228147756455974, |
|
"learning_rate": 3.7113771758316255e-07, |
|
"loss": 0.9366, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"grad_norm": 1.9372326169891063, |
|
"learning_rate": 3.593268301147139e-07, |
|
"loss": 0.9316, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 1.6478279831526508, |
|
"learning_rate": 3.472060438683302e-07, |
|
"loss": 0.9248, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.41, |
|
"grad_norm": 2.1263539873072306, |
|
"learning_rate": 3.348097165295075e-07, |
|
"loss": 0.9232, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 4.833864927411976, |
|
"learning_rate": 3.2217298683490525e-07, |
|
"loss": 0.9349, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 1.821858101250319, |
|
"learning_rate": 3.0933167496777873e-07, |
|
"loss": 0.9038, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.46, |
|
"grad_norm": 3.375067752782225, |
|
"learning_rate": 2.9632218102177856e-07, |
|
"loss": 0.9208, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 1.6452545248116937, |
|
"learning_rate": 2.8318138182093047e-07, |
|
"loss": 0.9091, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.49, |
|
"grad_norm": 1.641648446261072, |
|
"learning_rate": 2.6994652638827075e-07, |
|
"loss": 0.8863, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"grad_norm": 1.8514627902865017, |
|
"learning_rate": 2.566551303594437e-07, |
|
"loss": 0.9176, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 1.6398219929819895, |
|
"learning_rate": 2.433448696405563e-07, |
|
"loss": 0.9132, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 1.6191504043540148, |
|
"learning_rate": 2.300534736117292e-07, |
|
"loss": 0.9188, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 1.6846506224257867, |
|
"learning_rate": 2.168186181790695e-07, |
|
"loss": 0.9207, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 2.702325993164427, |
|
"learning_rate": 2.0367781897822144e-07, |
|
"loss": 0.917, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"grad_norm": 4.5333338294812044, |
|
"learning_rate": 1.9066832503222128e-07, |
|
"loss": 0.9267, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 1.831113061882906, |
|
"learning_rate": 1.7782701316509478e-07, |
|
"loss": 0.9067, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 1.6699336990379, |
|
"learning_rate": 1.651902834704924e-07, |
|
"loss": 0.92, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 1.8690462677412756, |
|
"learning_rate": 1.5279395613166985e-07, |
|
"loss": 0.942, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"grad_norm": 1.9247744555110595, |
|
"learning_rate": 1.4067316988528616e-07, |
|
"loss": 0.9024, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"grad_norm": 1.453181146995208, |
|
"learning_rate": 1.2886228241683748e-07, |
|
"loss": 0.9064, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"grad_norm": 2.049559625310238, |
|
"learning_rate": 1.173947729700644e-07, |
|
"loss": 0.9271, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"grad_norm": 1.486378300397099, |
|
"learning_rate": 1.0630314744639829e-07, |
|
"loss": 0.9109, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 2.1416606235277196, |
|
"learning_rate": 9.561884626345204e-08, |
|
"loss": 0.904, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.74, |
|
"grad_norm": 1.6338101300074668, |
|
"learning_rate": 8.537215523374037e-08, |
|
"loss": 0.9148, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 2.3037793287537545, |
|
"learning_rate": 7.559211971625384e-08, |
|
"loss": 0.8772, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"grad_norm": 1.788820498612225, |
|
"learning_rate": 6.630646228423323e-08, |
|
"loss": 0.9081, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 2.313616876377129, |
|
"learning_rate": 5.75415041425234e-08, |
|
"loss": 0.93, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 1.8442253511582427, |
|
"learning_rate": 4.932209051725914e-08, |
|
"loss": 0.9303, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 1.797668108469576, |
|
"learning_rate": 4.167152022937123e-08, |
|
"loss": 0.9046, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 2.0071094870512707, |
|
"learning_rate": 3.461147965154845e-08, |
|
"loss": 0.8853, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.85, |
|
"grad_norm": 1.5091131315393522, |
|
"learning_rate": 2.816198123585714e-08, |
|
"loss": 0.893, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 2.291025373137871, |
|
"learning_rate": 2.234130678627169e-08, |
|
"loss": 0.9102, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.89, |
|
"grad_norm": 1.975139175162289, |
|
"learning_rate": 1.7165955636915392e-08, |
|
"loss": 0.9448, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 1.9662365790545857, |
|
"learning_rate": 1.265059788290468e-08, |
|
"loss": 0.9126, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 1.5860718910737048, |
|
"learning_rate": 8.808032796371017e-09, |
|
"loss": 0.9008, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 5.824152576947844, |
|
"learning_rate": 5.649152545533331e-09, |
|
"loss": 0.9104, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"grad_norm": 1.8084725184982782, |
|
"learning_rate": 3.1829113196638613e-09, |
|
"loss": 0.9108, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 2.4499467355310878, |
|
"learning_rate": 1.4162999474657266e-09, |
|
"loss": 0.9218, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"grad_norm": 1.7234130969562493, |
|
"learning_rate": 3.5432608080951386e-10, |
|
"loss": 0.913, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 1.5468535844514548, |
|
"learning_rate": 0.0, |
|
"loss": 0.9164, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 61, |
|
"total_flos": 57696792150016.0, |
|
"train_loss": 0.9464570713824914, |
|
"train_runtime": 1092.378, |
|
"train_samples_per_second": 14.334, |
|
"train_steps_per_second": 0.056 |
|
} |
|
], |
|
"logging_steps": 1.0, |
|
"max_steps": 61, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 1000, |
|
"total_flos": 57696792150016.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|