nttx's picture
Training in progress, step 72, checkpoint
4a2c7b6 verified
{
"best_metric": 1.8469011783599854,
"best_model_checkpoint": "miner_id_24/checkpoint-50",
"epoch": 3.031578947368421,
"eval_steps": 50,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.042105263157894736,
"grad_norm": 56.92111587524414,
"learning_rate": 3.3333333333333333e-06,
"loss": 4.736,
"step": 1
},
{
"epoch": 0.042105263157894736,
"eval_loss": 5.939783573150635,
"eval_runtime": 0.9382,
"eval_samples_per_second": 42.636,
"eval_steps_per_second": 21.318,
"step": 1
},
{
"epoch": 0.08421052631578947,
"grad_norm": 90.5248794555664,
"learning_rate": 6.666666666666667e-06,
"loss": 5.8035,
"step": 2
},
{
"epoch": 0.12631578947368421,
"grad_norm": 91.04177856445312,
"learning_rate": 1e-05,
"loss": 5.7811,
"step": 3
},
{
"epoch": 0.16842105263157894,
"grad_norm": 61.93333435058594,
"learning_rate": 1.3333333333333333e-05,
"loss": 6.0173,
"step": 4
},
{
"epoch": 0.21052631578947367,
"grad_norm": 30.22568702697754,
"learning_rate": 1.6666666666666667e-05,
"loss": 6.035,
"step": 5
},
{
"epoch": 0.25263157894736843,
"grad_norm": 13.215743064880371,
"learning_rate": 2e-05,
"loss": 4.5946,
"step": 6
},
{
"epoch": 0.29473684210526313,
"grad_norm": 13.480833053588867,
"learning_rate": 2.3333333333333336e-05,
"loss": 4.9091,
"step": 7
},
{
"epoch": 0.3368421052631579,
"grad_norm": 16.19270133972168,
"learning_rate": 2.6666666666666667e-05,
"loss": 4.8701,
"step": 8
},
{
"epoch": 0.37894736842105264,
"grad_norm": 16.374988555908203,
"learning_rate": 3e-05,
"loss": 4.968,
"step": 9
},
{
"epoch": 0.42105263157894735,
"grad_norm": 14.905458450317383,
"learning_rate": 3.3333333333333335e-05,
"loss": 5.0304,
"step": 10
},
{
"epoch": 0.4631578947368421,
"grad_norm": 9.489227294921875,
"learning_rate": 3.6666666666666666e-05,
"loss": 3.7592,
"step": 11
},
{
"epoch": 0.5052631578947369,
"grad_norm": 11.08930778503418,
"learning_rate": 4e-05,
"loss": 3.8876,
"step": 12
},
{
"epoch": 0.5473684210526316,
"grad_norm": 10.394680976867676,
"learning_rate": 4.3333333333333334e-05,
"loss": 3.3706,
"step": 13
},
{
"epoch": 0.5894736842105263,
"grad_norm": 9.603910446166992,
"learning_rate": 4.666666666666667e-05,
"loss": 3.4961,
"step": 14
},
{
"epoch": 0.631578947368421,
"grad_norm": 10.872783660888672,
"learning_rate": 5e-05,
"loss": 3.4646,
"step": 15
},
{
"epoch": 0.6736842105263158,
"grad_norm": 4.739568710327148,
"learning_rate": 5.333333333333333e-05,
"loss": 2.9148,
"step": 16
},
{
"epoch": 0.7157894736842105,
"grad_norm": 10.105243682861328,
"learning_rate": 5.666666666666667e-05,
"loss": 2.6496,
"step": 17
},
{
"epoch": 0.7578947368421053,
"grad_norm": 7.365514755249023,
"learning_rate": 6e-05,
"loss": 2.397,
"step": 18
},
{
"epoch": 0.8,
"grad_norm": 9.584090232849121,
"learning_rate": 6.333333333333333e-05,
"loss": 2.6661,
"step": 19
},
{
"epoch": 0.8421052631578947,
"grad_norm": 12.043893814086914,
"learning_rate": 6.666666666666667e-05,
"loss": 2.6988,
"step": 20
},
{
"epoch": 0.8842105263157894,
"grad_norm": 4.687735557556152,
"learning_rate": 7e-05,
"loss": 2.319,
"step": 21
},
{
"epoch": 0.9263157894736842,
"grad_norm": 3.9934568405151367,
"learning_rate": 7.333333333333333e-05,
"loss": 2.3204,
"step": 22
},
{
"epoch": 0.968421052631579,
"grad_norm": 5.374063968658447,
"learning_rate": 7.666666666666667e-05,
"loss": 2.3321,
"step": 23
},
{
"epoch": 1.0105263157894737,
"grad_norm": 7.540462017059326,
"learning_rate": 8e-05,
"loss": 3.4714,
"step": 24
},
{
"epoch": 1.0526315789473684,
"grad_norm": 2.834944725036621,
"learning_rate": 8.333333333333334e-05,
"loss": 2.2019,
"step": 25
},
{
"epoch": 1.0947368421052632,
"grad_norm": 2.0690505504608154,
"learning_rate": 8.666666666666667e-05,
"loss": 1.972,
"step": 26
},
{
"epoch": 1.1368421052631579,
"grad_norm": 2.3660922050476074,
"learning_rate": 9e-05,
"loss": 1.8028,
"step": 27
},
{
"epoch": 1.1789473684210527,
"grad_norm": 3.848764657974243,
"learning_rate": 9.333333333333334e-05,
"loss": 2.236,
"step": 28
},
{
"epoch": 1.2210526315789474,
"grad_norm": 3.466052293777466,
"learning_rate": 9.666666666666667e-05,
"loss": 2.2219,
"step": 29
},
{
"epoch": 1.263157894736842,
"grad_norm": 2.4781200885772705,
"learning_rate": 0.0001,
"loss": 1.9075,
"step": 30
},
{
"epoch": 1.305263157894737,
"grad_norm": 1.7537541389465332,
"learning_rate": 9.986018985905901e-05,
"loss": 1.4744,
"step": 31
},
{
"epoch": 1.3473684210526315,
"grad_norm": 2.746380090713501,
"learning_rate": 9.944154131125642e-05,
"loss": 1.8355,
"step": 32
},
{
"epoch": 1.3894736842105262,
"grad_norm": 3.393972635269165,
"learning_rate": 9.874639560909117e-05,
"loss": 2.1244,
"step": 33
},
{
"epoch": 1.431578947368421,
"grad_norm": 2.6089048385620117,
"learning_rate": 9.777864028930705e-05,
"loss": 1.9716,
"step": 34
},
{
"epoch": 1.4736842105263157,
"grad_norm": 1.823164463043213,
"learning_rate": 9.654368743221022e-05,
"loss": 1.9002,
"step": 35
},
{
"epoch": 1.5157894736842106,
"grad_norm": 1.9079663753509521,
"learning_rate": 9.504844339512095e-05,
"loss": 1.6313,
"step": 36
},
{
"epoch": 1.5578947368421052,
"grad_norm": 2.2473649978637695,
"learning_rate": 9.330127018922194e-05,
"loss": 1.7734,
"step": 37
},
{
"epoch": 1.6,
"grad_norm": 2.3360750675201416,
"learning_rate": 9.131193871579975e-05,
"loss": 1.914,
"step": 38
},
{
"epoch": 1.6421052631578947,
"grad_norm": 2.3720486164093018,
"learning_rate": 8.90915741234015e-05,
"loss": 1.9042,
"step": 39
},
{
"epoch": 1.6842105263157894,
"grad_norm": 1.573614478111267,
"learning_rate": 8.665259359149132e-05,
"loss": 1.84,
"step": 40
},
{
"epoch": 1.7263157894736842,
"grad_norm": 1.7999635934829712,
"learning_rate": 8.400863688854597e-05,
"loss": 1.6501,
"step": 41
},
{
"epoch": 1.768421052631579,
"grad_norm": 1.5906981229782104,
"learning_rate": 8.117449009293668e-05,
"loss": 1.4623,
"step": 42
},
{
"epoch": 1.8105263157894735,
"grad_norm": 2.0781795978546143,
"learning_rate": 7.81660029031811e-05,
"loss": 1.8479,
"step": 43
},
{
"epoch": 1.8526315789473684,
"grad_norm": 2.567904472351074,
"learning_rate": 7.500000000000001e-05,
"loss": 2.108,
"step": 44
},
{
"epoch": 1.8947368421052633,
"grad_norm": 1.6959781646728516,
"learning_rate": 7.169418695587791e-05,
"loss": 1.783,
"step": 45
},
{
"epoch": 1.936842105263158,
"grad_norm": 1.7170368432998657,
"learning_rate": 6.826705121831976e-05,
"loss": 1.4268,
"step": 46
},
{
"epoch": 1.9789473684210526,
"grad_norm": 1.9766607284545898,
"learning_rate": 6.473775872054521e-05,
"loss": 1.7051,
"step": 47
},
{
"epoch": 2.0210526315789474,
"grad_norm": 2.622908353805542,
"learning_rate": 6.112604669781572e-05,
"loss": 2.4976,
"step": 48
},
{
"epoch": 2.0631578947368423,
"grad_norm": 1.3014780282974243,
"learning_rate": 5.745211330880872e-05,
"loss": 1.4391,
"step": 49
},
{
"epoch": 2.1052631578947367,
"grad_norm": 1.44355309009552,
"learning_rate": 5.373650467932122e-05,
"loss": 1.246,
"step": 50
},
{
"epoch": 2.1052631578947367,
"eval_loss": 1.8469011783599854,
"eval_runtime": 0.9352,
"eval_samples_per_second": 42.772,
"eval_steps_per_second": 21.386,
"step": 50
},
{
"epoch": 2.1473684210526316,
"grad_norm": 1.6968210935592651,
"learning_rate": 5e-05,
"loss": 1.1525,
"step": 51
},
{
"epoch": 2.1894736842105265,
"grad_norm": 2.781216621398926,
"learning_rate": 4.626349532067879e-05,
"loss": 1.622,
"step": 52
},
{
"epoch": 2.231578947368421,
"grad_norm": 2.010775089263916,
"learning_rate": 4.254788669119127e-05,
"loss": 1.7309,
"step": 53
},
{
"epoch": 2.2736842105263158,
"grad_norm": 1.5809705257415771,
"learning_rate": 3.887395330218429e-05,
"loss": 1.621,
"step": 54
},
{
"epoch": 2.3157894736842106,
"grad_norm": 1.6603655815124512,
"learning_rate": 3.5262241279454785e-05,
"loss": 1.3279,
"step": 55
},
{
"epoch": 2.3578947368421055,
"grad_norm": 1.745091199874878,
"learning_rate": 3.173294878168025e-05,
"loss": 1.2659,
"step": 56
},
{
"epoch": 2.4,
"grad_norm": 2.30116868019104,
"learning_rate": 2.8305813044122097e-05,
"loss": 1.668,
"step": 57
},
{
"epoch": 2.442105263157895,
"grad_norm": 1.877693772315979,
"learning_rate": 2.500000000000001e-05,
"loss": 1.6808,
"step": 58
},
{
"epoch": 2.4842105263157896,
"grad_norm": 1.442994475364685,
"learning_rate": 2.1833997096818898e-05,
"loss": 1.5137,
"step": 59
},
{
"epoch": 2.526315789473684,
"grad_norm": 1.3358914852142334,
"learning_rate": 1.8825509907063327e-05,
"loss": 1.0792,
"step": 60
},
{
"epoch": 2.568421052631579,
"grad_norm": 1.8833284378051758,
"learning_rate": 1.599136311145402e-05,
"loss": 1.423,
"step": 61
},
{
"epoch": 2.610526315789474,
"grad_norm": 2.2090237140655518,
"learning_rate": 1.3347406408508695e-05,
"loss": 1.5257,
"step": 62
},
{
"epoch": 2.6526315789473687,
"grad_norm": 1.901261806488037,
"learning_rate": 1.090842587659851e-05,
"loss": 1.8915,
"step": 63
},
{
"epoch": 2.694736842105263,
"grad_norm": 1.4518189430236816,
"learning_rate": 8.688061284200266e-06,
"loss": 1.3918,
"step": 64
},
{
"epoch": 2.736842105263158,
"grad_norm": 1.5766031742095947,
"learning_rate": 6.698729810778065e-06,
"loss": 1.3363,
"step": 65
},
{
"epoch": 2.7789473684210524,
"grad_norm": 1.7875163555145264,
"learning_rate": 4.951556604879048e-06,
"loss": 1.3858,
"step": 66
},
{
"epoch": 2.8210526315789473,
"grad_norm": 2.0564568042755127,
"learning_rate": 3.4563125677897932e-06,
"loss": 1.4302,
"step": 67
},
{
"epoch": 2.863157894736842,
"grad_norm": 1.6587474346160889,
"learning_rate": 2.221359710692961e-06,
"loss": 1.5845,
"step": 68
},
{
"epoch": 2.905263157894737,
"grad_norm": 1.4605294466018677,
"learning_rate": 1.2536043909088191e-06,
"loss": 1.2857,
"step": 69
},
{
"epoch": 2.9473684210526314,
"grad_norm": 1.5679987668991089,
"learning_rate": 5.584586887435739e-07,
"loss": 1.1443,
"step": 70
},
{
"epoch": 2.9894736842105263,
"grad_norm": 2.4672374725341797,
"learning_rate": 1.3981014094099353e-07,
"loss": 1.6679,
"step": 71
},
{
"epoch": 3.031578947368421,
"grad_norm": 1.6800793409347534,
"learning_rate": 0.0,
"loss": 1.7372,
"step": 72
}
],
"logging_steps": 1,
"max_steps": 72,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.5338666276683776e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}