|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.49930871025083945, |
|
"eval_steps": 16, |
|
"global_step": 79, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006320363420896702, |
|
"grad_norm": 0.48046875, |
|
"learning_rate": 2e-05, |
|
"loss": 0.6497, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006320363420896702, |
|
"eval_loss": 0.5998793244361877, |
|
"eval_runtime": 56.2331, |
|
"eval_samples_per_second": 18.957, |
|
"eval_steps_per_second": 18.957, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.012640726841793404, |
|
"grad_norm": 0.470703125, |
|
"learning_rate": 4e-05, |
|
"loss": 0.6349, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.018961090262690106, |
|
"grad_norm": 0.46875, |
|
"learning_rate": 6e-05, |
|
"loss": 0.5832, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.025281453683586808, |
|
"grad_norm": 0.47265625, |
|
"learning_rate": 8e-05, |
|
"loss": 0.557, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03160181710448351, |
|
"grad_norm": 0.35546875, |
|
"learning_rate": 0.0001, |
|
"loss": 0.4966, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03792218052538021, |
|
"grad_norm": 0.31640625, |
|
"learning_rate": 0.00012, |
|
"loss": 0.3771, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.04424254394627691, |
|
"grad_norm": 0.298828125, |
|
"learning_rate": 0.00014, |
|
"loss": 0.318, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.050562907367173615, |
|
"grad_norm": 0.36328125, |
|
"learning_rate": 0.00016, |
|
"loss": 0.296, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05688327078807032, |
|
"grad_norm": 0.3046875, |
|
"learning_rate": 0.00018, |
|
"loss": 0.2682, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06320363420896702, |
|
"grad_norm": 0.29296875, |
|
"learning_rate": 0.0002, |
|
"loss": 0.3039, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06952399762986372, |
|
"grad_norm": 0.1845703125, |
|
"learning_rate": 0.00019999770790755575, |
|
"loss": 0.3096, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07584436105076042, |
|
"grad_norm": 0.16796875, |
|
"learning_rate": 0.00019999083173529673, |
|
"loss": 0.2575, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08216472447165712, |
|
"grad_norm": 0.1689453125, |
|
"learning_rate": 0.00019997937179843937, |
|
"loss": 0.289, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08848508789255383, |
|
"grad_norm": 0.1572265625, |
|
"learning_rate": 0.0001999633286223284, |
|
"loss": 0.2878, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.09480545131345053, |
|
"grad_norm": 0.12353515625, |
|
"learning_rate": 0.00019994270294241266, |
|
"loss": 0.2274, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.10112581473434723, |
|
"grad_norm": 0.11572265625, |
|
"learning_rate": 0.00019991749570421146, |
|
"loss": 0.2252, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.10112581473434723, |
|
"eval_loss": 0.25023654103279114, |
|
"eval_runtime": 56.1603, |
|
"eval_samples_per_second": 18.981, |
|
"eval_steps_per_second": 18.981, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.10744617815524393, |
|
"grad_norm": 0.1298828125, |
|
"learning_rate": 0.0001998877080632712, |
|
"loss": 0.2512, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11376654157614063, |
|
"grad_norm": 0.193359375, |
|
"learning_rate": 0.00019985334138511237, |
|
"loss": 0.2659, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.12008690499703734, |
|
"grad_norm": 0.142578125, |
|
"learning_rate": 0.00019981439724516716, |
|
"loss": 0.2415, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.12640726841793404, |
|
"grad_norm": 0.126953125, |
|
"learning_rate": 0.0001997708774287068, |
|
"loss": 0.2661, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13272763183883074, |
|
"grad_norm": 0.1142578125, |
|
"learning_rate": 0.00019972278393076023, |
|
"loss": 0.2046, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.13904799525972744, |
|
"grad_norm": 0.11865234375, |
|
"learning_rate": 0.0001996701189560223, |
|
"loss": 0.2087, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.14536835868062414, |
|
"grad_norm": 0.1435546875, |
|
"learning_rate": 0.00019961288491875278, |
|
"loss": 0.2246, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.15168872210152085, |
|
"grad_norm": 0.119140625, |
|
"learning_rate": 0.00019955108444266585, |
|
"loss": 0.1831, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.15800908552241755, |
|
"grad_norm": 0.1376953125, |
|
"learning_rate": 0.00019948472036080949, |
|
"loss": 0.2398, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16432944894331425, |
|
"grad_norm": 0.1201171875, |
|
"learning_rate": 0.00019941379571543596, |
|
"loss": 0.189, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.17064981236421095, |
|
"grad_norm": 0.12890625, |
|
"learning_rate": 0.00019933831375786216, |
|
"loss": 0.2156, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.17697017578510765, |
|
"grad_norm": 0.1259765625, |
|
"learning_rate": 0.00019925827794832056, |
|
"loss": 0.2012, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.18329053920600435, |
|
"grad_norm": 0.11572265625, |
|
"learning_rate": 0.00019917369195580063, |
|
"loss": 0.1602, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.18961090262690106, |
|
"grad_norm": 0.12890625, |
|
"learning_rate": 0.00019908455965788067, |
|
"loss": 0.1976, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.19593126604779776, |
|
"grad_norm": 0.11279296875, |
|
"learning_rate": 0.00019899088514055004, |
|
"loss": 0.1874, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.20225162946869446, |
|
"grad_norm": 0.1357421875, |
|
"learning_rate": 0.00019889267269802176, |
|
"loss": 0.2024, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.20225162946869446, |
|
"eval_loss": 0.2020280808210373, |
|
"eval_runtime": 56.2737, |
|
"eval_samples_per_second": 18.943, |
|
"eval_steps_per_second": 18.943, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.20857199288959116, |
|
"grad_norm": 0.1123046875, |
|
"learning_rate": 0.00019878992683253582, |
|
"loss": 0.1819, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.21489235631048786, |
|
"grad_norm": 0.1123046875, |
|
"learning_rate": 0.00019868265225415265, |
|
"loss": 0.1794, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.22121271973138457, |
|
"grad_norm": 0.12060546875, |
|
"learning_rate": 0.00019857085388053723, |
|
"loss": 0.1943, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.22753308315228127, |
|
"grad_norm": 0.134765625, |
|
"learning_rate": 0.00019845453683673368, |
|
"loss": 0.2265, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.23385344657317797, |
|
"grad_norm": 0.115234375, |
|
"learning_rate": 0.00019833370645493047, |
|
"loss": 0.181, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.24017380999407467, |
|
"grad_norm": 0.12353515625, |
|
"learning_rate": 0.0001982083682742156, |
|
"loss": 0.1946, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.24649417341497137, |
|
"grad_norm": 0.107421875, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 0.1418, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.2528145368358681, |
|
"grad_norm": 0.11572265625, |
|
"learning_rate": 0.00019794419170536916, |
|
"loss": 0.1651, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.2591349002567648, |
|
"grad_norm": 0.1337890625, |
|
"learning_rate": 0.00019780536542758, |
|
"loss": 0.1821, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.2654552636776615, |
|
"grad_norm": 0.12353515625, |
|
"learning_rate": 0.00019766205557100868, |
|
"loss": 0.1913, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.2717756270985582, |
|
"grad_norm": 0.12451171875, |
|
"learning_rate": 0.00019751426870524407, |
|
"loss": 0.194, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2780959905194549, |
|
"grad_norm": 0.1162109375, |
|
"learning_rate": 0.00019736201160510931, |
|
"loss": 0.1832, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.2844163539403516, |
|
"grad_norm": 0.12353515625, |
|
"learning_rate": 0.0001972052912503514, |
|
"loss": 0.19, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2907367173612483, |
|
"grad_norm": 0.11474609375, |
|
"learning_rate": 0.00019704411482532116, |
|
"loss": 0.1875, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.297057080782145, |
|
"grad_norm": 0.1123046875, |
|
"learning_rate": 0.00019687848971864389, |
|
"loss": 0.184, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3033774442030417, |
|
"grad_norm": 0.107421875, |
|
"learning_rate": 0.0001967084235228807, |
|
"loss": 0.1581, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3033774442030417, |
|
"eval_loss": 0.18037649989128113, |
|
"eval_runtime": 56.5509, |
|
"eval_samples_per_second": 18.85, |
|
"eval_steps_per_second": 18.85, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3096978076239384, |
|
"grad_norm": 0.10302734375, |
|
"learning_rate": 0.00019653392403418043, |
|
"loss": 0.1766, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3160181710448351, |
|
"grad_norm": 0.11669921875, |
|
"learning_rate": 0.0001963549992519223, |
|
"loss": 0.1888, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3223385344657318, |
|
"grad_norm": 0.10009765625, |
|
"learning_rate": 0.00019617165737834916, |
|
"loss": 0.139, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.3286588978866285, |
|
"grad_norm": 0.12451171875, |
|
"learning_rate": 0.0001959839068181914, |
|
"loss": 0.1845, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.3349792613075252, |
|
"grad_norm": 0.12060546875, |
|
"learning_rate": 0.00019579175617828187, |
|
"loss": 0.2043, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.3412996247284219, |
|
"grad_norm": 0.1142578125, |
|
"learning_rate": 0.00019559521426716118, |
|
"loss": 0.1678, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.3476199881493186, |
|
"grad_norm": 0.11669921875, |
|
"learning_rate": 0.0001953942900946739, |
|
"loss": 0.1671, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.3539403515702153, |
|
"grad_norm": 0.115234375, |
|
"learning_rate": 0.00019518899287155556, |
|
"loss": 0.1724, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.360260714991112, |
|
"grad_norm": 0.126953125, |
|
"learning_rate": 0.0001949793320090105, |
|
"loss": 0.177, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.3665810784120087, |
|
"grad_norm": 0.1259765625, |
|
"learning_rate": 0.00019476531711828027, |
|
"loss": 0.1644, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.3729014418329054, |
|
"grad_norm": 0.1142578125, |
|
"learning_rate": 0.0001945469580102031, |
|
"loss": 0.1564, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.3792218052538021, |
|
"grad_norm": 0.10302734375, |
|
"learning_rate": 0.0001943242646947643, |
|
"loss": 0.1353, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3855421686746988, |
|
"grad_norm": 0.10791015625, |
|
"learning_rate": 0.00019409724738063714, |
|
"loss": 0.1622, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.3918625320955955, |
|
"grad_norm": 0.10791015625, |
|
"learning_rate": 0.00019386591647471506, |
|
"loss": 0.1564, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.3981828955164922, |
|
"grad_norm": 0.11376953125, |
|
"learning_rate": 0.00019363028258163447, |
|
"loss": 0.176, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.4045032589373889, |
|
"grad_norm": 0.1162109375, |
|
"learning_rate": 0.00019339035650328869, |
|
"loss": 0.1912, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.4045032589373889, |
|
"eval_loss": 0.1681559830904007, |
|
"eval_runtime": 56.4912, |
|
"eval_samples_per_second": 18.87, |
|
"eval_steps_per_second": 18.87, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.4108236223582856, |
|
"grad_norm": 0.11572265625, |
|
"learning_rate": 0.0001931461492383327, |
|
"loss": 0.1959, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.4171439857791823, |
|
"grad_norm": 0.0966796875, |
|
"learning_rate": 0.00019289767198167916, |
|
"loss": 0.1379, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.423464349200079, |
|
"grad_norm": 0.1142578125, |
|
"learning_rate": 0.00019264493612398481, |
|
"loss": 0.1669, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.42978471262097573, |
|
"grad_norm": 0.09619140625, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.1279, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.43610507604187243, |
|
"grad_norm": 0.10986328125, |
|
"learning_rate": 0.0001921267351436808, |
|
"loss": 0.1535, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.44242543946276913, |
|
"grad_norm": 0.11962890625, |
|
"learning_rate": 0.0001918612937763622, |
|
"loss": 0.1697, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.44874580288366583, |
|
"grad_norm": 0.107421875, |
|
"learning_rate": 0.00019159164131749587, |
|
"loss": 0.166, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.45506616630456254, |
|
"grad_norm": 0.10986328125, |
|
"learning_rate": 0.00019131779012844912, |
|
"loss": 0.1508, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.46138652972545924, |
|
"grad_norm": 0.1201171875, |
|
"learning_rate": 0.00019103975276306678, |
|
"loss": 0.1617, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.46770689314635594, |
|
"grad_norm": 0.1103515625, |
|
"learning_rate": 0.00019075754196709572, |
|
"loss": 0.1436, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.47402725656725264, |
|
"grad_norm": 0.1025390625, |
|
"learning_rate": 0.0001904711706776006, |
|
"loss": 0.1408, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.48034761998814934, |
|
"grad_norm": 0.1171875, |
|
"learning_rate": 0.00019018065202237083, |
|
"loss": 0.1594, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.48666798340904605, |
|
"grad_norm": 0.10546875, |
|
"learning_rate": 0.00018988599931931866, |
|
"loss": 0.1394, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.49298834682994275, |
|
"grad_norm": 0.111328125, |
|
"learning_rate": 0.0001895872260758688, |
|
"loss": 0.1448, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.49930871025083945, |
|
"grad_norm": 0.111328125, |
|
"learning_rate": 0.00018928434598833912, |
|
"loss": 0.156, |
|
"step": 79 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 474, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 79, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.384934679033938e+17, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|