|
{ |
|
"best_metric": 1.5559238195419312, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.008104056080068074, |
|
"eval_steps": 50, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00016208112160136148, |
|
"grad_norm": 21.450843811035156, |
|
"learning_rate": 1e-05, |
|
"loss": 9.9546, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00016208112160136148, |
|
"eval_loss": 2.8611927032470703, |
|
"eval_runtime": 1024.5411, |
|
"eval_samples_per_second": 10.143, |
|
"eval_steps_per_second": 2.536, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00032416224320272295, |
|
"grad_norm": 20.973268508911133, |
|
"learning_rate": 2e-05, |
|
"loss": 10.6012, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00048624336480408443, |
|
"grad_norm": 18.114707946777344, |
|
"learning_rate": 3e-05, |
|
"loss": 10.833, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0006483244864054459, |
|
"grad_norm": 12.959110260009766, |
|
"learning_rate": 4e-05, |
|
"loss": 10.37, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0008104056080068074, |
|
"grad_norm": 14.474491119384766, |
|
"learning_rate": 5e-05, |
|
"loss": 9.5092, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009724867296081689, |
|
"grad_norm": 14.347698211669922, |
|
"learning_rate": 6e-05, |
|
"loss": 8.3771, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0011345678512095303, |
|
"grad_norm": 11.730208396911621, |
|
"learning_rate": 7e-05, |
|
"loss": 8.6556, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0012966489728108918, |
|
"grad_norm": 10.065325736999512, |
|
"learning_rate": 8e-05, |
|
"loss": 8.7092, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0014587300944122533, |
|
"grad_norm": 8.963871955871582, |
|
"learning_rate": 9e-05, |
|
"loss": 8.0248, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0016208112160136148, |
|
"grad_norm": 9.340985298156738, |
|
"learning_rate": 0.0001, |
|
"loss": 7.8928, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0017828923376149762, |
|
"grad_norm": 7.826351165771484, |
|
"learning_rate": 9.999316524962345e-05, |
|
"loss": 7.7345, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0019449734592163377, |
|
"grad_norm": 7.2508697509765625, |
|
"learning_rate": 9.997266286704631e-05, |
|
"loss": 7.6428, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002107054580817699, |
|
"grad_norm": 6.509117603302002, |
|
"learning_rate": 9.993849845741524e-05, |
|
"loss": 6.6641, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0022691357024190607, |
|
"grad_norm": 7.647972106933594, |
|
"learning_rate": 9.989068136093873e-05, |
|
"loss": 7.2049, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.002431216824020422, |
|
"grad_norm": 6.6707000732421875, |
|
"learning_rate": 9.98292246503335e-05, |
|
"loss": 6.8951, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0025932979456217836, |
|
"grad_norm": 6.7753095626831055, |
|
"learning_rate": 9.975414512725057e-05, |
|
"loss": 7.1702, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.002755379067223145, |
|
"grad_norm": 5.891744136810303, |
|
"learning_rate": 9.966546331768191e-05, |
|
"loss": 6.3936, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0029174601888245066, |
|
"grad_norm": 6.441070556640625, |
|
"learning_rate": 9.956320346634876e-05, |
|
"loss": 6.748, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.003079541310425868, |
|
"grad_norm": 5.625812530517578, |
|
"learning_rate": 9.944739353007344e-05, |
|
"loss": 6.2807, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0032416224320272295, |
|
"grad_norm": 5.964963436126709, |
|
"learning_rate": 9.931806517013612e-05, |
|
"loss": 6.8812, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.003403703553628591, |
|
"grad_norm": 6.139531135559082, |
|
"learning_rate": 9.917525374361912e-05, |
|
"loss": 6.2387, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0035657846752299525, |
|
"grad_norm": 5.533466815948486, |
|
"learning_rate": 9.901899829374047e-05, |
|
"loss": 6.1352, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.003727865796831314, |
|
"grad_norm": 5.690276622772217, |
|
"learning_rate": 9.884934153917997e-05, |
|
"loss": 6.4498, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0038899469184326754, |
|
"grad_norm": 5.708102703094482, |
|
"learning_rate": 9.86663298624003e-05, |
|
"loss": 6.3784, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.004052028040034037, |
|
"grad_norm": 5.579735279083252, |
|
"learning_rate": 9.847001329696653e-05, |
|
"loss": 6.3002, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.004214109161635398, |
|
"grad_norm": 5.780838966369629, |
|
"learning_rate": 9.826044551386744e-05, |
|
"loss": 5.8777, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00437619028323676, |
|
"grad_norm": 5.95778226852417, |
|
"learning_rate": 9.803768380684242e-05, |
|
"loss": 6.3759, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004538271404838121, |
|
"grad_norm": 5.736791133880615, |
|
"learning_rate": 9.780178907671789e-05, |
|
"loss": 6.1456, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.004700352526439483, |
|
"grad_norm": 5.662046909332275, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 6.1055, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.004862433648040844, |
|
"grad_norm": 5.7258806228637695, |
|
"learning_rate": 9.729086208503174e-05, |
|
"loss": 6.2523, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.005024514769642206, |
|
"grad_norm": 5.707207202911377, |
|
"learning_rate": 9.701596950580806e-05, |
|
"loss": 5.7521, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.005186595891243567, |
|
"grad_norm": 5.748448371887207, |
|
"learning_rate": 9.672822322997305e-05, |
|
"loss": 6.0935, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.005348677012844929, |
|
"grad_norm": 5.671350479125977, |
|
"learning_rate": 9.642770192448536e-05, |
|
"loss": 6.2013, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.00551075813444629, |
|
"grad_norm": 6.076589584350586, |
|
"learning_rate": 9.611448774886924e-05, |
|
"loss": 6.2627, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.005672839256047652, |
|
"grad_norm": 6.076786041259766, |
|
"learning_rate": 9.578866633275288e-05, |
|
"loss": 6.2, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.005834920377649013, |
|
"grad_norm": 6.442121982574463, |
|
"learning_rate": 9.545032675245813e-05, |
|
"loss": 6.097, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005997001499250375, |
|
"grad_norm": 6.375777244567871, |
|
"learning_rate": 9.509956150664796e-05, |
|
"loss": 6.3335, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.006159082620851736, |
|
"grad_norm": 6.780538558959961, |
|
"learning_rate": 9.473646649103818e-05, |
|
"loss": 6.4989, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.006321163742453098, |
|
"grad_norm": 6.880222797393799, |
|
"learning_rate": 9.43611409721806e-05, |
|
"loss": 6.6263, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.006483244864054459, |
|
"grad_norm": 6.89586877822876, |
|
"learning_rate": 9.397368756032445e-05, |
|
"loss": 6.6472, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0066453259856558205, |
|
"grad_norm": 7.324431896209717, |
|
"learning_rate": 9.357421218136386e-05, |
|
"loss": 6.9357, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.006807407107257182, |
|
"grad_norm": 7.074251174926758, |
|
"learning_rate": 9.316282404787871e-05, |
|
"loss": 6.4737, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0069694882288585435, |
|
"grad_norm": 7.916954040527344, |
|
"learning_rate": 9.273963562927695e-05, |
|
"loss": 6.9543, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.007131569350459905, |
|
"grad_norm": 8.427472114562988, |
|
"learning_rate": 9.230476262104677e-05, |
|
"loss": 6.6134, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.007293650472061266, |
|
"grad_norm": 8.175810813903809, |
|
"learning_rate": 9.185832391312644e-05, |
|
"loss": 6.5496, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.007455731593662628, |
|
"grad_norm": 8.738851547241211, |
|
"learning_rate": 9.140044155740101e-05, |
|
"loss": 6.8652, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.007617812715263989, |
|
"grad_norm": 9.828368186950684, |
|
"learning_rate": 9.093124073433463e-05, |
|
"loss": 6.7311, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.007779893836865351, |
|
"grad_norm": 10.030229568481445, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 6.7401, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007941974958466713, |
|
"grad_norm": 12.767650604248047, |
|
"learning_rate": 8.995939984474624e-05, |
|
"loss": 7.082, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.008104056080068074, |
|
"grad_norm": 14.701695442199707, |
|
"learning_rate": 8.945702546981969e-05, |
|
"loss": 5.9265, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.008104056080068074, |
|
"eval_loss": 1.5559238195419312, |
|
"eval_runtime": 1031.1317, |
|
"eval_samples_per_second": 10.078, |
|
"eval_steps_per_second": 2.52, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 9.095305596449587e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|