|
{ |
|
"best_metric": 1.0271567106246948, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.0028107652308340947, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 5.621530461668189e-05, |
|
"grad_norm": 1.0037671327590942, |
|
"learning_rate": 0.0001, |
|
"loss": 4.8903, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 5.621530461668189e-05, |
|
"eval_loss": 1.4244569540023804, |
|
"eval_runtime": 7759.2914, |
|
"eval_samples_per_second": 0.965, |
|
"eval_steps_per_second": 0.483, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00011243060923336378, |
|
"grad_norm": 0.9153568744659424, |
|
"learning_rate": 0.0002, |
|
"loss": 4.4247, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.00016864591385004567, |
|
"grad_norm": 1.173575520515442, |
|
"learning_rate": 0.00019978589232386035, |
|
"loss": 5.1528, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00022486121846672757, |
|
"grad_norm": 1.2566075325012207, |
|
"learning_rate": 0.00019914448613738106, |
|
"loss": 4.5673, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00028107652308340944, |
|
"grad_norm": 1.372121810913086, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 4.9937, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00033729182770009133, |
|
"grad_norm": 1.0780651569366455, |
|
"learning_rate": 0.00019659258262890683, |
|
"loss": 4.4226, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.00039350713231677323, |
|
"grad_norm": 0.942474901676178, |
|
"learning_rate": 0.0001946930129495106, |
|
"loss": 4.4456, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.00044972243693345513, |
|
"grad_norm": 1.527437686920166, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 4.7015, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.000505937741550137, |
|
"grad_norm": 2.087895631790161, |
|
"learning_rate": 0.00018968727415326884, |
|
"loss": 4.1849, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0005621530461668189, |
|
"grad_norm": 0.8344571590423584, |
|
"learning_rate": 0.00018660254037844388, |
|
"loss": 3.8176, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0006183683507835008, |
|
"grad_norm": 0.8567643761634827, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 4.0602, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0006745836554001827, |
|
"grad_norm": 1.0290203094482422, |
|
"learning_rate": 0.00017933533402912354, |
|
"loss": 3.8269, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.0007307989600168646, |
|
"grad_norm": 1.0773515701293945, |
|
"learning_rate": 0.00017518398074789775, |
|
"loss": 4.0728, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0007870142646335465, |
|
"grad_norm": 0.9667721390724182, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 3.9006, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0008432295692502284, |
|
"grad_norm": 1.1101466417312622, |
|
"learning_rate": 0.00016593458151000688, |
|
"loss": 4.1819, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0008994448738669103, |
|
"grad_norm": 1.3487257957458496, |
|
"learning_rate": 0.00016087614290087208, |
|
"loss": 4.6194, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0009556601784835922, |
|
"grad_norm": 1.091251015663147, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 4.0581, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.001011875483100274, |
|
"grad_norm": 1.0720723867416382, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 3.7407, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.001068090787716956, |
|
"grad_norm": 1.1119567155838013, |
|
"learning_rate": 0.00014422886902190014, |
|
"loss": 4.1069, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0011243060923336377, |
|
"grad_norm": 1.2740784883499146, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 3.5911, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0011805213969503198, |
|
"grad_norm": 1.1338714361190796, |
|
"learning_rate": 0.00013214394653031616, |
|
"loss": 3.7111, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.0012367367015670015, |
|
"grad_norm": 1.1218507289886475, |
|
"learning_rate": 0.00012588190451025207, |
|
"loss": 3.8789, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.0012929520061836835, |
|
"grad_norm": 1.2838630676269531, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 3.9582, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0013491673108003653, |
|
"grad_norm": 1.4839881658554077, |
|
"learning_rate": 0.00011305261922200519, |
|
"loss": 4.4876, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0014053826154170473, |
|
"grad_norm": 1.1227571964263916, |
|
"learning_rate": 0.00010654031292301432, |
|
"loss": 4.0313, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0014053826154170473, |
|
"eval_loss": 1.0495991706848145, |
|
"eval_runtime": 7778.5368, |
|
"eval_samples_per_second": 0.963, |
|
"eval_steps_per_second": 0.481, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0014615979200337291, |
|
"grad_norm": 1.109177589416504, |
|
"learning_rate": 0.0001, |
|
"loss": 4.4231, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0015178132246504111, |
|
"grad_norm": 1.3502737283706665, |
|
"learning_rate": 9.345968707698569e-05, |
|
"loss": 4.1029, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.001574028529267093, |
|
"grad_norm": 1.2638400793075562, |
|
"learning_rate": 8.694738077799488e-05, |
|
"loss": 4.4384, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.001630243833883775, |
|
"grad_norm": 1.253785490989685, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 3.7949, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.0016864591385004567, |
|
"grad_norm": 1.2122159004211426, |
|
"learning_rate": 7.411809548974792e-05, |
|
"loss": 4.5219, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0017426744431171387, |
|
"grad_norm": 1.1317585706710815, |
|
"learning_rate": 6.785605346968386e-05, |
|
"loss": 4.0376, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.0017988897477338205, |
|
"grad_norm": 3.269996404647827, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 4.1345, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.0018551050523505025, |
|
"grad_norm": 2.4214236736297607, |
|
"learning_rate": 5.577113097809989e-05, |
|
"loss": 3.9074, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0019113203569671843, |
|
"grad_norm": 1.186686635017395, |
|
"learning_rate": 5.000000000000002e-05, |
|
"loss": 3.9877, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.001967535661583866, |
|
"grad_norm": 1.3781710863113403, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 4.4423, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.002023750966200548, |
|
"grad_norm": 1.2803940773010254, |
|
"learning_rate": 3.9123857099127936e-05, |
|
"loss": 4.1471, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.00207996627081723, |
|
"grad_norm": 1.5898265838623047, |
|
"learning_rate": 3.406541848999312e-05, |
|
"loss": 4.409, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.002136181575433912, |
|
"grad_norm": 1.3593076467514038, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 3.7433, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.0021923968800505937, |
|
"grad_norm": 1.3938697576522827, |
|
"learning_rate": 2.4816019252102273e-05, |
|
"loss": 4.0942, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0022486121846672755, |
|
"grad_norm": 1.291236162185669, |
|
"learning_rate": 2.0664665970876496e-05, |
|
"loss": 4.0084, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0023048274892839577, |
|
"grad_norm": 1.3661845922470093, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 4.1608, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.0023610427939006395, |
|
"grad_norm": 1.5577927827835083, |
|
"learning_rate": 1.339745962155613e-05, |
|
"loss": 4.073, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0024172580985173213, |
|
"grad_norm": 1.5352163314819336, |
|
"learning_rate": 1.0312725846731175e-05, |
|
"loss": 4.3885, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.002473473403134003, |
|
"grad_norm": 1.6036438941955566, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 4.3737, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.0025296887077506853, |
|
"grad_norm": 1.6673026084899902, |
|
"learning_rate": 5.306987050489442e-06, |
|
"loss": 4.0295, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.002585904012367367, |
|
"grad_norm": 1.7688616514205933, |
|
"learning_rate": 3.40741737109318e-06, |
|
"loss": 4.6062, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.002642119316984049, |
|
"grad_norm": 1.910326600074768, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 5.2657, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0026983346216007307, |
|
"grad_norm": 2.360198736190796, |
|
"learning_rate": 8.555138626189618e-07, |
|
"loss": 4.7428, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.002754549926217413, |
|
"grad_norm": 1.9681812524795532, |
|
"learning_rate": 2.141076761396521e-07, |
|
"loss": 4.1981, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0028107652308340947, |
|
"grad_norm": 3.8947713375091553, |
|
"learning_rate": 0.0, |
|
"loss": 5.1876, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0028107652308340947, |
|
"eval_loss": 1.0271567106246948, |
|
"eval_runtime": 7776.1879, |
|
"eval_samples_per_second": 0.963, |
|
"eval_steps_per_second": 0.482, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.361684987904e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|