dada22231's picture
Training in progress, step 75, checkpoint
115c80d verified
{
"best_metric": 10.366877555847168,
"best_model_checkpoint": "miner_id_24/checkpoint-75",
"epoch": 2.564102564102564,
"eval_steps": 25,
"global_step": 75,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03418803418803419,
"grad_norm": 0.024028141051530838,
"learning_rate": 3.3333333333333335e-05,
"loss": 10.3795,
"step": 1
},
{
"epoch": 0.03418803418803419,
"eval_loss": 10.37679672241211,
"eval_runtime": 0.1087,
"eval_samples_per_second": 459.817,
"eval_steps_per_second": 119.552,
"step": 1
},
{
"epoch": 0.06837606837606838,
"grad_norm": 0.027240164577960968,
"learning_rate": 6.666666666666667e-05,
"loss": 10.379,
"step": 2
},
{
"epoch": 0.10256410256410256,
"grad_norm": 0.027441630139946938,
"learning_rate": 0.0001,
"loss": 10.3783,
"step": 3
},
{
"epoch": 0.13675213675213677,
"grad_norm": 0.02922813594341278,
"learning_rate": 9.99571699711836e-05,
"loss": 10.378,
"step": 4
},
{
"epoch": 0.17094017094017094,
"grad_norm": 0.03131942078471184,
"learning_rate": 9.982876141412856e-05,
"loss": 10.3768,
"step": 5
},
{
"epoch": 0.20512820512820512,
"grad_norm": 0.04379850625991821,
"learning_rate": 9.961501876182148e-05,
"loss": 10.3738,
"step": 6
},
{
"epoch": 0.23931623931623933,
"grad_norm": 0.0539846196770668,
"learning_rate": 9.931634888554937e-05,
"loss": 10.3747,
"step": 7
},
{
"epoch": 0.27350427350427353,
"grad_norm": 0.02217571809887886,
"learning_rate": 9.893332032039701e-05,
"loss": 10.3786,
"step": 8
},
{
"epoch": 0.3076923076923077,
"grad_norm": 0.02530466951429844,
"learning_rate": 9.846666218300807e-05,
"loss": 10.3781,
"step": 9
},
{
"epoch": 0.3418803418803419,
"grad_norm": 0.027638306841254234,
"learning_rate": 9.791726278367022e-05,
"loss": 10.3776,
"step": 10
},
{
"epoch": 0.37606837606837606,
"grad_norm": 0.029069717973470688,
"learning_rate": 9.728616793536588e-05,
"loss": 10.3769,
"step": 11
},
{
"epoch": 0.41025641025641024,
"grad_norm": 0.03112073801457882,
"learning_rate": 9.657457896300791e-05,
"loss": 10.3757,
"step": 12
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.03275328129529953,
"learning_rate": 9.578385041664925e-05,
"loss": 10.376,
"step": 13
},
{
"epoch": 0.47863247863247865,
"grad_norm": 0.04708424210548401,
"learning_rate": 9.491548749301997e-05,
"loss": 10.3737,
"step": 14
},
{
"epoch": 0.5128205128205128,
"grad_norm": 0.024200988933444023,
"learning_rate": 9.397114317029975e-05,
"loss": 10.3772,
"step": 15
},
{
"epoch": 0.5470085470085471,
"grad_norm": 0.025222577154636383,
"learning_rate": 9.295261506157986e-05,
"loss": 10.3776,
"step": 16
},
{
"epoch": 0.5811965811965812,
"grad_norm": 0.029448403045535088,
"learning_rate": 9.186184199300464e-05,
"loss": 10.3763,
"step": 17
},
{
"epoch": 0.6153846153846154,
"grad_norm": 0.03146638721227646,
"learning_rate": 9.070090031310558e-05,
"loss": 10.3758,
"step": 18
},
{
"epoch": 0.6495726495726496,
"grad_norm": 0.03456265106797218,
"learning_rate": 8.947199994035401e-05,
"loss": 10.375,
"step": 19
},
{
"epoch": 0.6837606837606838,
"grad_norm": 0.03508660942316055,
"learning_rate": 8.817748015645558e-05,
"loss": 10.3738,
"step": 20
},
{
"epoch": 0.717948717948718,
"grad_norm": 0.05112173408269882,
"learning_rate": 8.681980515339464e-05,
"loss": 10.3729,
"step": 21
},
{
"epoch": 0.7521367521367521,
"grad_norm": 0.027508389204740524,
"learning_rate": 8.540155934270471e-05,
"loss": 10.3757,
"step": 22
},
{
"epoch": 0.7863247863247863,
"grad_norm": 0.03241690620779991,
"learning_rate": 8.392544243589427e-05,
"loss": 10.3762,
"step": 23
},
{
"epoch": 0.8205128205128205,
"grad_norm": 0.03374025970697403,
"learning_rate": 8.239426430539243e-05,
"loss": 10.3759,
"step": 24
},
{
"epoch": 0.8547008547008547,
"grad_norm": 0.03651708737015724,
"learning_rate": 8.081093963579707e-05,
"loss": 10.3749,
"step": 25
},
{
"epoch": 0.8547008547008547,
"eval_loss": 10.37341022491455,
"eval_runtime": 0.0998,
"eval_samples_per_second": 500.815,
"eval_steps_per_second": 130.212,
"step": 25
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.03954649716615677,
"learning_rate": 7.917848237560709e-05,
"loss": 10.3741,
"step": 26
},
{
"epoch": 0.9230769230769231,
"grad_norm": 0.04464032128453255,
"learning_rate": 7.75e-05,
"loss": 10.3713,
"step": 27
},
{
"epoch": 0.9572649572649573,
"grad_norm": 0.059212006628513336,
"learning_rate": 7.577868759557654e-05,
"loss": 10.3721,
"step": 28
},
{
"epoch": 0.9914529914529915,
"grad_norm": 0.0634295716881752,
"learning_rate": 7.401782177833148e-05,
"loss": 10.3694,
"step": 29
},
{
"epoch": 1.0256410256410255,
"grad_norm": 0.06830056756734848,
"learning_rate": 7.222075445642904e-05,
"loss": 18.6043,
"step": 30
},
{
"epoch": 1.0598290598290598,
"grad_norm": 0.038523830473423004,
"learning_rate": 7.03909064496551e-05,
"loss": 10.0611,
"step": 31
},
{
"epoch": 1.0940170940170941,
"grad_norm": 0.04325374588370323,
"learning_rate": 6.853176097769229e-05,
"loss": 10.396,
"step": 32
},
{
"epoch": 1.1282051282051282,
"grad_norm": 0.04963960871100426,
"learning_rate": 6.664685702961344e-05,
"loss": 10.5299,
"step": 33
},
{
"epoch": 1.1623931623931625,
"grad_norm": 0.04767056554555893,
"learning_rate": 6.473978262721463e-05,
"loss": 10.4291,
"step": 34
},
{
"epoch": 1.1965811965811965,
"grad_norm": 0.05898962542414665,
"learning_rate": 6.281416799501188e-05,
"loss": 10.5953,
"step": 35
},
{
"epoch": 1.2307692307692308,
"grad_norm": 0.0721501037478447,
"learning_rate": 6.087367864990233e-05,
"loss": 10.3811,
"step": 36
},
{
"epoch": 1.264957264957265,
"grad_norm": 0.04224683344364166,
"learning_rate": 5.8922008423644624e-05,
"loss": 9.4466,
"step": 37
},
{
"epoch": 1.2991452991452992,
"grad_norm": 0.05125311017036438,
"learning_rate": 5.696287243144013e-05,
"loss": 10.8119,
"step": 38
},
{
"epoch": 1.3333333333333333,
"grad_norm": 0.051614489406347275,
"learning_rate": 5.500000000000001e-05,
"loss": 10.4335,
"step": 39
},
{
"epoch": 1.3675213675213675,
"grad_norm": 0.06009482964873314,
"learning_rate": 5.303712756855988e-05,
"loss": 10.5632,
"step": 40
},
{
"epoch": 1.4017094017094016,
"grad_norm": 0.06151549890637398,
"learning_rate": 5.107799157635538e-05,
"loss": 10.3618,
"step": 41
},
{
"epoch": 1.435897435897436,
"grad_norm": 0.07110217213630676,
"learning_rate": 4.912632135009769e-05,
"loss": 10.5662,
"step": 42
},
{
"epoch": 1.4700854700854702,
"grad_norm": 0.09137392044067383,
"learning_rate": 4.718583200498814e-05,
"loss": 10.3167,
"step": 43
},
{
"epoch": 1.5042735042735043,
"grad_norm": 0.044047337025403976,
"learning_rate": 4.526021737278538e-05,
"loss": 8.1399,
"step": 44
},
{
"epoch": 1.5384615384615383,
"grad_norm": 0.06453102082014084,
"learning_rate": 4.3353142970386564e-05,
"loss": 12.2658,
"step": 45
},
{
"epoch": 1.5726495726495726,
"grad_norm": 0.06560295820236206,
"learning_rate": 4.146823902230772e-05,
"loss": 10.2777,
"step": 46
},
{
"epoch": 1.606837606837607,
"grad_norm": 0.06661686301231384,
"learning_rate": 3.960909355034491e-05,
"loss": 10.5795,
"step": 47
},
{
"epoch": 1.641025641025641,
"grad_norm": 0.0714309886097908,
"learning_rate": 3.777924554357096e-05,
"loss": 10.4365,
"step": 48
},
{
"epoch": 1.6752136752136753,
"grad_norm": 0.0735783651471138,
"learning_rate": 3.598217822166854e-05,
"loss": 10.5224,
"step": 49
},
{
"epoch": 1.7094017094017095,
"grad_norm": 0.0946367010474205,
"learning_rate": 3.422131240442349e-05,
"loss": 10.2449,
"step": 50
},
{
"epoch": 1.7094017094017095,
"eval_loss": 10.369140625,
"eval_runtime": 0.0971,
"eval_samples_per_second": 515.162,
"eval_steps_per_second": 133.942,
"step": 50
},
{
"epoch": 1.7435897435897436,
"grad_norm": 0.04459521546959877,
"learning_rate": 3.250000000000001e-05,
"loss": 4.0781,
"step": 51
},
{
"epoch": 1.7777777777777777,
"grad_norm": 0.09696029871702194,
"learning_rate": 3.082151762439293e-05,
"loss": 16.5477,
"step": 52
},
{
"epoch": 1.811965811965812,
"grad_norm": 0.07526196539402008,
"learning_rate": 2.9189060364202943e-05,
"loss": 10.3167,
"step": 53
},
{
"epoch": 1.8461538461538463,
"grad_norm": 0.07211493700742722,
"learning_rate": 2.760573569460757e-05,
"loss": 10.3497,
"step": 54
},
{
"epoch": 1.8803418803418803,
"grad_norm": 0.0758114755153656,
"learning_rate": 2.6074557564105727e-05,
"loss": 10.5534,
"step": 55
},
{
"epoch": 1.9145299145299144,
"grad_norm": 0.08330105990171432,
"learning_rate": 2.459844065729529e-05,
"loss": 10.3561,
"step": 56
},
{
"epoch": 1.9487179487179487,
"grad_norm": 0.1038593128323555,
"learning_rate": 2.3180194846605367e-05,
"loss": 10.4654,
"step": 57
},
{
"epoch": 1.982905982905983,
"grad_norm": 0.10510390996932983,
"learning_rate": 2.1822519843544424e-05,
"loss": 10.8554,
"step": 58
},
{
"epoch": 2.017094017094017,
"grad_norm": 0.12666811048984528,
"learning_rate": 2.0528000059645997e-05,
"loss": 17.6932,
"step": 59
},
{
"epoch": 2.051282051282051,
"grad_norm": 0.07206396758556366,
"learning_rate": 1.9299099686894423e-05,
"loss": 10.1238,
"step": 60
},
{
"epoch": 2.0854700854700856,
"grad_norm": 0.08021845668554306,
"learning_rate": 1.8138158006995364e-05,
"loss": 10.3257,
"step": 61
},
{
"epoch": 2.1196581196581197,
"grad_norm": 0.0848311111330986,
"learning_rate": 1.7047384938420154e-05,
"loss": 10.6517,
"step": 62
},
{
"epoch": 2.1538461538461537,
"grad_norm": 0.08744092285633087,
"learning_rate": 1.602885682970026e-05,
"loss": 10.3375,
"step": 63
},
{
"epoch": 2.1880341880341883,
"grad_norm": 0.09486929327249527,
"learning_rate": 1.5084512506980026e-05,
"loss": 10.7017,
"step": 64
},
{
"epoch": 2.2222222222222223,
"grad_norm": 0.11365848779678345,
"learning_rate": 1.4216149583350754e-05,
"loss": 10.3117,
"step": 65
},
{
"epoch": 2.2564102564102564,
"grad_norm": 0.0729779526591301,
"learning_rate": 1.3425421036992098e-05,
"loss": 8.8041,
"step": 66
},
{
"epoch": 2.2905982905982905,
"grad_norm": 0.08433752506971359,
"learning_rate": 1.2713832064634126e-05,
"loss": 11.3562,
"step": 67
},
{
"epoch": 2.324786324786325,
"grad_norm": 0.09108246862888336,
"learning_rate": 1.2082737216329794e-05,
"loss": 10.4199,
"step": 68
},
{
"epoch": 2.358974358974359,
"grad_norm": 0.0905907079577446,
"learning_rate": 1.1533337816991932e-05,
"loss": 10.6594,
"step": 69
},
{
"epoch": 2.393162393162393,
"grad_norm": 0.09025926142930984,
"learning_rate": 1.1066679679603e-05,
"loss": 10.3229,
"step": 70
},
{
"epoch": 2.427350427350427,
"grad_norm": 0.09516981989145279,
"learning_rate": 1.0683651114450641e-05,
"loss": 10.6938,
"step": 71
},
{
"epoch": 2.4615384615384617,
"grad_norm": 0.11765694618225098,
"learning_rate": 1.0384981238178534e-05,
"loss": 10.066,
"step": 72
},
{
"epoch": 2.4957264957264957,
"grad_norm": 0.058387916535139084,
"learning_rate": 1.017123858587145e-05,
"loss": 5.3975,
"step": 73
},
{
"epoch": 2.52991452991453,
"grad_norm": 0.10583050549030304,
"learning_rate": 1.00428300288164e-05,
"loss": 15.0466,
"step": 74
},
{
"epoch": 2.564102564102564,
"grad_norm": 0.0866541638970375,
"learning_rate": 1e-05,
"loss": 10.3698,
"step": 75
},
{
"epoch": 2.564102564102564,
"eval_loss": 10.366877555847168,
"eval_runtime": 0.101,
"eval_samples_per_second": 495.053,
"eval_steps_per_second": 128.714,
"step": 75
}
],
"logging_steps": 1,
"max_steps": 75,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 25,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 64285739581440.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}