|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.868411617067049, |
|
"eval_steps": 2000, |
|
"global_step": 8000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 2.6033709049224854, |
|
"learning_rate": 9.9e-07, |
|
"loss": 1.0789, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"grad_norm": 3.7718708515167236, |
|
"learning_rate": 9.9e-07, |
|
"loss": 0.8366, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"grad_norm": 4.209937572479248, |
|
"learning_rate": 9.8e-07, |
|
"loss": 0.6863, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"grad_norm": 2.8079888820648193, |
|
"learning_rate": 9.698989898989898e-07, |
|
"loss": 0.6659, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 4.12679386138916, |
|
"learning_rate": 9.598989898989899e-07, |
|
"loss": 0.6546, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"grad_norm": 3.909221887588501, |
|
"learning_rate": 9.497979797979798e-07, |
|
"loss": 0.6581, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 3.234121322631836, |
|
"learning_rate": 9.396969696969696e-07, |
|
"loss": 0.6224, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.29, |
|
"grad_norm": 3.9791226387023926, |
|
"learning_rate": 9.295959595959596e-07, |
|
"loss": 0.6139, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 3.557487726211548, |
|
"learning_rate": 9.194949494949495e-07, |
|
"loss": 0.6272, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 3.946579694747925, |
|
"learning_rate": 9.093939393939394e-07, |
|
"loss": 0.6312, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"grad_norm": 3.4085192680358887, |
|
"learning_rate": 8.992929292929292e-07, |
|
"loss": 0.6139, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 3.036348342895508, |
|
"learning_rate": 8.891919191919191e-07, |
|
"loss": 0.637, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.47, |
|
"grad_norm": 5.595344066619873, |
|
"learning_rate": 8.790909090909091e-07, |
|
"loss": 0.6447, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 4.76248025894165, |
|
"learning_rate": 8.68989898989899e-07, |
|
"loss": 0.6419, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 2.9645845890045166, |
|
"learning_rate": 8.588888888888888e-07, |
|
"loss": 0.6569, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.57, |
|
"grad_norm": 2.357501268386841, |
|
"learning_rate": 8.487878787878787e-07, |
|
"loss": 0.6345, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.61, |
|
"grad_norm": 4.0495171546936035, |
|
"learning_rate": 8.386868686868687e-07, |
|
"loss": 0.6004, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"grad_norm": 3.0601418018341064, |
|
"learning_rate": 8.285858585858585e-07, |
|
"loss": 0.6444, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 4.655466079711914, |
|
"learning_rate": 8.184848484848484e-07, |
|
"loss": 0.621, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 4.7475666999816895, |
|
"learning_rate": 8.083838383838384e-07, |
|
"loss": 0.6554, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_loss": 0.7412441372871399, |
|
"eval_runtime": 565.2186, |
|
"eval_samples_per_second": 1.769, |
|
"eval_steps_per_second": 0.442, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 4.251164436340332, |
|
"learning_rate": 7.982828282828282e-07, |
|
"loss": 0.5995, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"grad_norm": 5.217769145965576, |
|
"learning_rate": 7.881818181818182e-07, |
|
"loss": 0.5995, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"grad_norm": 3.5968048572540283, |
|
"learning_rate": 7.78080808080808e-07, |
|
"loss": 0.5888, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.86, |
|
"grad_norm": 3.394106388092041, |
|
"learning_rate": 7.679797979797979e-07, |
|
"loss": 0.5945, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 2.829554796218872, |
|
"learning_rate": 7.578787878787879e-07, |
|
"loss": 0.6102, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 6.449063777923584, |
|
"learning_rate": 7.477777777777778e-07, |
|
"loss": 0.6231, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.97, |
|
"grad_norm": 5.725988388061523, |
|
"learning_rate": 7.376767676767676e-07, |
|
"loss": 0.5908, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 4.436861515045166, |
|
"learning_rate": 7.276767676767677e-07, |
|
"loss": 0.6021, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 5.17264986038208, |
|
"learning_rate": 7.175757575757575e-07, |
|
"loss": 0.6142, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 4.872317790985107, |
|
"learning_rate": 7.074747474747474e-07, |
|
"loss": 0.572, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.11, |
|
"grad_norm": 2.839364767074585, |
|
"learning_rate": 6.973737373737374e-07, |
|
"loss": 0.5839, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"grad_norm": 4.395648956298828, |
|
"learning_rate": 6.872727272727273e-07, |
|
"loss": 0.5789, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"grad_norm": 3.2688417434692383, |
|
"learning_rate": 6.771717171717171e-07, |
|
"loss": 0.5866, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 1.22, |
|
"grad_norm": 3.7224113941192627, |
|
"learning_rate": 6.67070707070707e-07, |
|
"loss": 0.5899, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 1.25, |
|
"grad_norm": 5.710260391235352, |
|
"learning_rate": 6.56969696969697e-07, |
|
"loss": 0.5904, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"grad_norm": 2.4542274475097656, |
|
"learning_rate": 6.468686868686868e-07, |
|
"loss": 0.576, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 1.33, |
|
"grad_norm": 3.639458417892456, |
|
"learning_rate": 6.367676767676767e-07, |
|
"loss": 0.5973, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 1.36, |
|
"grad_norm": 5.416239261627197, |
|
"learning_rate": 6.267676767676767e-07, |
|
"loss": 0.5855, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 4.7683491706848145, |
|
"learning_rate": 6.166666666666667e-07, |
|
"loss": 0.601, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"grad_norm": 4.035972595214844, |
|
"learning_rate": 6.065656565656565e-07, |
|
"loss": 0.5937, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_loss": 0.7268955111503601, |
|
"eval_runtime": 200.0217, |
|
"eval_samples_per_second": 4.999, |
|
"eval_steps_per_second": 1.25, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.47, |
|
"grad_norm": 4.085280895233154, |
|
"learning_rate": 5.964646464646465e-07, |
|
"loss": 0.6013, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 1.51, |
|
"grad_norm": 3.6953155994415283, |
|
"learning_rate": 5.863636363636362e-07, |
|
"loss": 0.5759, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"grad_norm": 3.836264133453369, |
|
"learning_rate": 5.762626262626262e-07, |
|
"loss": 0.5884, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 1.58, |
|
"grad_norm": 2.8229424953460693, |
|
"learning_rate": 5.661616161616162e-07, |
|
"loss": 0.587, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 4.92821741104126, |
|
"learning_rate": 5.56060606060606e-07, |
|
"loss": 0.588, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"grad_norm": 4.6057209968566895, |
|
"learning_rate": 5.459595959595959e-07, |
|
"loss": 0.5626, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 1.69, |
|
"grad_norm": 4.422310829162598, |
|
"learning_rate": 5.358585858585858e-07, |
|
"loss": 0.6004, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 4.302188873291016, |
|
"learning_rate": 5.257575757575757e-07, |
|
"loss": 0.5968, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 4.448782444000244, |
|
"learning_rate": 5.156565656565657e-07, |
|
"loss": 0.5558, |
|
"step": 4900 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 7.0250468254089355, |
|
"learning_rate": 5.055555555555555e-07, |
|
"loss": 0.5816, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"grad_norm": 3.789271354675293, |
|
"learning_rate": 4.954545454545454e-07, |
|
"loss": 0.5916, |
|
"step": 5100 |
|
}, |
|
{ |
|
"epoch": 1.86, |
|
"grad_norm": 4.383288860321045, |
|
"learning_rate": 4.853535353535353e-07, |
|
"loss": 0.5971, |
|
"step": 5200 |
|
}, |
|
{ |
|
"epoch": 1.9, |
|
"grad_norm": 3.860795497894287, |
|
"learning_rate": 4.752525252525252e-07, |
|
"loss": 0.5777, |
|
"step": 5300 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"grad_norm": 4.237746715545654, |
|
"learning_rate": 4.6515151515151513e-07, |
|
"loss": 0.5624, |
|
"step": 5400 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 4.311546325683594, |
|
"learning_rate": 4.55050505050505e-07, |
|
"loss": 0.564, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.01, |
|
"grad_norm": 4.511820316314697, |
|
"learning_rate": 4.449494949494949e-07, |
|
"loss": 0.5877, |
|
"step": 5600 |
|
}, |
|
{ |
|
"epoch": 2.04, |
|
"grad_norm": 3.5643811225891113, |
|
"learning_rate": 4.3484848484848483e-07, |
|
"loss": 0.5613, |
|
"step": 5700 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"grad_norm": 8.33705997467041, |
|
"learning_rate": 4.2474747474747474e-07, |
|
"loss": 0.5495, |
|
"step": 5800 |
|
}, |
|
{ |
|
"epoch": 2.12, |
|
"grad_norm": 7.947268962860107, |
|
"learning_rate": 4.1464646464646466e-07, |
|
"loss": 0.5606, |
|
"step": 5900 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 3.636181116104126, |
|
"learning_rate": 4.045454545454545e-07, |
|
"loss": 0.5959, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_loss": 0.7278199791908264, |
|
"eval_runtime": 200.1118, |
|
"eval_samples_per_second": 4.997, |
|
"eval_steps_per_second": 1.249, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.19, |
|
"grad_norm": 5.361893653869629, |
|
"learning_rate": 3.9444444444444444e-07, |
|
"loss": 0.5576, |
|
"step": 6100 |
|
}, |
|
{ |
|
"epoch": 2.22, |
|
"grad_norm": 6.71414041519165, |
|
"learning_rate": 3.843434343434343e-07, |
|
"loss": 0.5528, |
|
"step": 6200 |
|
}, |
|
{ |
|
"epoch": 2.26, |
|
"grad_norm": 5.422142505645752, |
|
"learning_rate": 3.7424242424242427e-07, |
|
"loss": 0.5768, |
|
"step": 6300 |
|
}, |
|
{ |
|
"epoch": 2.29, |
|
"grad_norm": 5.764580249786377, |
|
"learning_rate": 3.6414141414141413e-07, |
|
"loss": 0.5366, |
|
"step": 6400 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 4.993276119232178, |
|
"learning_rate": 3.5404040404040405e-07, |
|
"loss": 0.5868, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.37, |
|
"grad_norm": 4.675057411193848, |
|
"learning_rate": 3.439393939393939e-07, |
|
"loss": 0.5757, |
|
"step": 6600 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"grad_norm": 5.1432952880859375, |
|
"learning_rate": 3.3383838383838383e-07, |
|
"loss": 0.5481, |
|
"step": 6700 |
|
}, |
|
{ |
|
"epoch": 2.44, |
|
"grad_norm": 4.467919826507568, |
|
"learning_rate": 3.237373737373737e-07, |
|
"loss": 0.5678, |
|
"step": 6800 |
|
}, |
|
{ |
|
"epoch": 2.47, |
|
"grad_norm": 5.593632698059082, |
|
"learning_rate": 3.1363636363636366e-07, |
|
"loss": 0.5806, |
|
"step": 6900 |
|
}, |
|
{ |
|
"epoch": 2.51, |
|
"grad_norm": 6.1436543464660645, |
|
"learning_rate": 3.035353535353535e-07, |
|
"loss": 0.5948, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 2.55, |
|
"grad_norm": 5.402268409729004, |
|
"learning_rate": 2.9343434343434344e-07, |
|
"loss": 0.5694, |
|
"step": 7100 |
|
}, |
|
{ |
|
"epoch": 2.58, |
|
"grad_norm": 6.120490550994873, |
|
"learning_rate": 2.833333333333333e-07, |
|
"loss": 0.5463, |
|
"step": 7200 |
|
}, |
|
{ |
|
"epoch": 2.62, |
|
"grad_norm": 5.256354331970215, |
|
"learning_rate": 2.732323232323232e-07, |
|
"loss": 0.5404, |
|
"step": 7300 |
|
}, |
|
{ |
|
"epoch": 2.65, |
|
"grad_norm": 5.65523624420166, |
|
"learning_rate": 2.631313131313131e-07, |
|
"loss": 0.5585, |
|
"step": 7400 |
|
}, |
|
{ |
|
"epoch": 2.69, |
|
"grad_norm": 5.652821063995361, |
|
"learning_rate": 2.5303030303030305e-07, |
|
"loss": 0.584, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"grad_norm": 5.771843433380127, |
|
"learning_rate": 2.429292929292929e-07, |
|
"loss": 0.5688, |
|
"step": 7600 |
|
}, |
|
{ |
|
"epoch": 2.76, |
|
"grad_norm": 4.806066036224365, |
|
"learning_rate": 2.3282828282828283e-07, |
|
"loss": 0.5708, |
|
"step": 7700 |
|
}, |
|
{ |
|
"epoch": 2.8, |
|
"grad_norm": 5.846754550933838, |
|
"learning_rate": 2.2272727272727272e-07, |
|
"loss": 0.5689, |
|
"step": 7800 |
|
}, |
|
{ |
|
"epoch": 2.83, |
|
"grad_norm": 5.313455104827881, |
|
"learning_rate": 2.1262626262626264e-07, |
|
"loss": 0.5546, |
|
"step": 7900 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"grad_norm": 5.1212897300720215, |
|
"learning_rate": 2.0252525252525253e-07, |
|
"loss": 0.5407, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 2.87, |
|
"eval_loss": 0.7307297587394714, |
|
"eval_runtime": 200.5169, |
|
"eval_samples_per_second": 4.987, |
|
"eval_steps_per_second": 1.247, |
|
"step": 8000 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 10000, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 2000, |
|
"total_flos": 7.541625072742564e+17, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|