nguyenthanhdo's picture
Upload folder using huggingface_hub
3c6bcdf verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9986174205016789,
"eval_steps": 16,
"global_step": 158,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006320363420896702,
"grad_norm": 0.48046875,
"learning_rate": 2e-05,
"loss": 0.6497,
"step": 1
},
{
"epoch": 0.006320363420896702,
"eval_loss": 0.5998793244361877,
"eval_runtime": 56.2331,
"eval_samples_per_second": 18.957,
"eval_steps_per_second": 18.957,
"step": 1
},
{
"epoch": 0.012640726841793404,
"grad_norm": 0.470703125,
"learning_rate": 4e-05,
"loss": 0.6349,
"step": 2
},
{
"epoch": 0.018961090262690106,
"grad_norm": 0.46875,
"learning_rate": 6e-05,
"loss": 0.5832,
"step": 3
},
{
"epoch": 0.025281453683586808,
"grad_norm": 0.47265625,
"learning_rate": 8e-05,
"loss": 0.557,
"step": 4
},
{
"epoch": 0.03160181710448351,
"grad_norm": 0.35546875,
"learning_rate": 0.0001,
"loss": 0.4966,
"step": 5
},
{
"epoch": 0.03792218052538021,
"grad_norm": 0.31640625,
"learning_rate": 0.00012,
"loss": 0.3771,
"step": 6
},
{
"epoch": 0.04424254394627691,
"grad_norm": 0.298828125,
"learning_rate": 0.00014,
"loss": 0.318,
"step": 7
},
{
"epoch": 0.050562907367173615,
"grad_norm": 0.36328125,
"learning_rate": 0.00016,
"loss": 0.296,
"step": 8
},
{
"epoch": 0.05688327078807032,
"grad_norm": 0.3046875,
"learning_rate": 0.00018,
"loss": 0.2682,
"step": 9
},
{
"epoch": 0.06320363420896702,
"grad_norm": 0.29296875,
"learning_rate": 0.0002,
"loss": 0.3039,
"step": 10
},
{
"epoch": 0.06952399762986372,
"grad_norm": 0.1845703125,
"learning_rate": 0.00019999770790755575,
"loss": 0.3096,
"step": 11
},
{
"epoch": 0.07584436105076042,
"grad_norm": 0.16796875,
"learning_rate": 0.00019999083173529673,
"loss": 0.2575,
"step": 12
},
{
"epoch": 0.08216472447165712,
"grad_norm": 0.1689453125,
"learning_rate": 0.00019997937179843937,
"loss": 0.289,
"step": 13
},
{
"epoch": 0.08848508789255383,
"grad_norm": 0.1572265625,
"learning_rate": 0.0001999633286223284,
"loss": 0.2878,
"step": 14
},
{
"epoch": 0.09480545131345053,
"grad_norm": 0.12353515625,
"learning_rate": 0.00019994270294241266,
"loss": 0.2274,
"step": 15
},
{
"epoch": 0.10112581473434723,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019991749570421146,
"loss": 0.2252,
"step": 16
},
{
"epoch": 0.10112581473434723,
"eval_loss": 0.25023654103279114,
"eval_runtime": 56.1603,
"eval_samples_per_second": 18.981,
"eval_steps_per_second": 18.981,
"step": 16
},
{
"epoch": 0.10744617815524393,
"grad_norm": 0.1298828125,
"learning_rate": 0.0001998877080632712,
"loss": 0.2512,
"step": 17
},
{
"epoch": 0.11376654157614063,
"grad_norm": 0.193359375,
"learning_rate": 0.00019985334138511237,
"loss": 0.2659,
"step": 18
},
{
"epoch": 0.12008690499703734,
"grad_norm": 0.142578125,
"learning_rate": 0.00019981439724516716,
"loss": 0.2415,
"step": 19
},
{
"epoch": 0.12640726841793404,
"grad_norm": 0.126953125,
"learning_rate": 0.0001997708774287068,
"loss": 0.2661,
"step": 20
},
{
"epoch": 0.13272763183883074,
"grad_norm": 0.1142578125,
"learning_rate": 0.00019972278393076023,
"loss": 0.2046,
"step": 21
},
{
"epoch": 0.13904799525972744,
"grad_norm": 0.11865234375,
"learning_rate": 0.0001996701189560223,
"loss": 0.2087,
"step": 22
},
{
"epoch": 0.14536835868062414,
"grad_norm": 0.1435546875,
"learning_rate": 0.00019961288491875278,
"loss": 0.2246,
"step": 23
},
{
"epoch": 0.15168872210152085,
"grad_norm": 0.119140625,
"learning_rate": 0.00019955108444266585,
"loss": 0.1831,
"step": 24
},
{
"epoch": 0.15800908552241755,
"grad_norm": 0.1376953125,
"learning_rate": 0.00019948472036080949,
"loss": 0.2398,
"step": 25
},
{
"epoch": 0.16432944894331425,
"grad_norm": 0.1201171875,
"learning_rate": 0.00019941379571543596,
"loss": 0.189,
"step": 26
},
{
"epoch": 0.17064981236421095,
"grad_norm": 0.12890625,
"learning_rate": 0.00019933831375786216,
"loss": 0.2156,
"step": 27
},
{
"epoch": 0.17697017578510765,
"grad_norm": 0.1259765625,
"learning_rate": 0.00019925827794832056,
"loss": 0.2012,
"step": 28
},
{
"epoch": 0.18329053920600435,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019917369195580063,
"loss": 0.1602,
"step": 29
},
{
"epoch": 0.18961090262690106,
"grad_norm": 0.12890625,
"learning_rate": 0.00019908455965788067,
"loss": 0.1976,
"step": 30
},
{
"epoch": 0.19593126604779776,
"grad_norm": 0.11279296875,
"learning_rate": 0.00019899088514055004,
"loss": 0.1874,
"step": 31
},
{
"epoch": 0.20225162946869446,
"grad_norm": 0.1357421875,
"learning_rate": 0.00019889267269802176,
"loss": 0.2024,
"step": 32
},
{
"epoch": 0.20225162946869446,
"eval_loss": 0.2020280808210373,
"eval_runtime": 56.2737,
"eval_samples_per_second": 18.943,
"eval_steps_per_second": 18.943,
"step": 32
},
{
"epoch": 0.20857199288959116,
"grad_norm": 0.1123046875,
"learning_rate": 0.00019878992683253582,
"loss": 0.1819,
"step": 33
},
{
"epoch": 0.21489235631048786,
"grad_norm": 0.1123046875,
"learning_rate": 0.00019868265225415265,
"loss": 0.1794,
"step": 34
},
{
"epoch": 0.22121271973138457,
"grad_norm": 0.12060546875,
"learning_rate": 0.00019857085388053723,
"loss": 0.1943,
"step": 35
},
{
"epoch": 0.22753308315228127,
"grad_norm": 0.134765625,
"learning_rate": 0.00019845453683673368,
"loss": 0.2265,
"step": 36
},
{
"epoch": 0.23385344657317797,
"grad_norm": 0.115234375,
"learning_rate": 0.00019833370645493047,
"loss": 0.181,
"step": 37
},
{
"epoch": 0.24017380999407467,
"grad_norm": 0.12353515625,
"learning_rate": 0.0001982083682742156,
"loss": 0.1946,
"step": 38
},
{
"epoch": 0.24649417341497137,
"grad_norm": 0.107421875,
"learning_rate": 0.00019807852804032305,
"loss": 0.1418,
"step": 39
},
{
"epoch": 0.2528145368358681,
"grad_norm": 0.11572265625,
"learning_rate": 0.00019794419170536916,
"loss": 0.1651,
"step": 40
},
{
"epoch": 0.2591349002567648,
"grad_norm": 0.1337890625,
"learning_rate": 0.00019780536542758,
"loss": 0.1821,
"step": 41
},
{
"epoch": 0.2654552636776615,
"grad_norm": 0.12353515625,
"learning_rate": 0.00019766205557100868,
"loss": 0.1913,
"step": 42
},
{
"epoch": 0.2717756270985582,
"grad_norm": 0.12451171875,
"learning_rate": 0.00019751426870524407,
"loss": 0.194,
"step": 43
},
{
"epoch": 0.2780959905194549,
"grad_norm": 0.1162109375,
"learning_rate": 0.00019736201160510931,
"loss": 0.1832,
"step": 44
},
{
"epoch": 0.2844163539403516,
"grad_norm": 0.12353515625,
"learning_rate": 0.0001972052912503514,
"loss": 0.19,
"step": 45
},
{
"epoch": 0.2907367173612483,
"grad_norm": 0.11474609375,
"learning_rate": 0.00019704411482532116,
"loss": 0.1875,
"step": 46
},
{
"epoch": 0.297057080782145,
"grad_norm": 0.1123046875,
"learning_rate": 0.00019687848971864389,
"loss": 0.184,
"step": 47
},
{
"epoch": 0.3033774442030417,
"grad_norm": 0.107421875,
"learning_rate": 0.0001967084235228807,
"loss": 0.1581,
"step": 48
},
{
"epoch": 0.3033774442030417,
"eval_loss": 0.18037649989128113,
"eval_runtime": 56.5509,
"eval_samples_per_second": 18.85,
"eval_steps_per_second": 18.85,
"step": 48
},
{
"epoch": 0.3096978076239384,
"grad_norm": 0.10302734375,
"learning_rate": 0.00019653392403418043,
"loss": 0.1766,
"step": 49
},
{
"epoch": 0.3160181710448351,
"grad_norm": 0.11669921875,
"learning_rate": 0.0001963549992519223,
"loss": 0.1888,
"step": 50
},
{
"epoch": 0.3223385344657318,
"grad_norm": 0.10009765625,
"learning_rate": 0.00019617165737834916,
"loss": 0.139,
"step": 51
},
{
"epoch": 0.3286588978866285,
"grad_norm": 0.12451171875,
"learning_rate": 0.0001959839068181914,
"loss": 0.1845,
"step": 52
},
{
"epoch": 0.3349792613075252,
"grad_norm": 0.12060546875,
"learning_rate": 0.00019579175617828187,
"loss": 0.2043,
"step": 53
},
{
"epoch": 0.3412996247284219,
"grad_norm": 0.1142578125,
"learning_rate": 0.00019559521426716118,
"loss": 0.1678,
"step": 54
},
{
"epoch": 0.3476199881493186,
"grad_norm": 0.11669921875,
"learning_rate": 0.0001953942900946739,
"loss": 0.1671,
"step": 55
},
{
"epoch": 0.3539403515702153,
"grad_norm": 0.115234375,
"learning_rate": 0.00019518899287155556,
"loss": 0.1724,
"step": 56
},
{
"epoch": 0.360260714991112,
"grad_norm": 0.126953125,
"learning_rate": 0.0001949793320090105,
"loss": 0.177,
"step": 57
},
{
"epoch": 0.3665810784120087,
"grad_norm": 0.1259765625,
"learning_rate": 0.00019476531711828027,
"loss": 0.1644,
"step": 58
},
{
"epoch": 0.3729014418329054,
"grad_norm": 0.1142578125,
"learning_rate": 0.0001945469580102031,
"loss": 0.1564,
"step": 59
},
{
"epoch": 0.3792218052538021,
"grad_norm": 0.10302734375,
"learning_rate": 0.0001943242646947643,
"loss": 0.1353,
"step": 60
},
{
"epoch": 0.3855421686746988,
"grad_norm": 0.10791015625,
"learning_rate": 0.00019409724738063714,
"loss": 0.1622,
"step": 61
},
{
"epoch": 0.3918625320955955,
"grad_norm": 0.10791015625,
"learning_rate": 0.00019386591647471506,
"loss": 0.1564,
"step": 62
},
{
"epoch": 0.3981828955164922,
"grad_norm": 0.11376953125,
"learning_rate": 0.00019363028258163447,
"loss": 0.176,
"step": 63
},
{
"epoch": 0.4045032589373889,
"grad_norm": 0.1162109375,
"learning_rate": 0.00019339035650328869,
"loss": 0.1912,
"step": 64
},
{
"epoch": 0.4045032589373889,
"eval_loss": 0.1681559830904007,
"eval_runtime": 56.4912,
"eval_samples_per_second": 18.87,
"eval_steps_per_second": 18.87,
"step": 64
},
{
"epoch": 0.4108236223582856,
"grad_norm": 0.11572265625,
"learning_rate": 0.0001931461492383327,
"loss": 0.1959,
"step": 65
},
{
"epoch": 0.4171439857791823,
"grad_norm": 0.0966796875,
"learning_rate": 0.00019289767198167916,
"loss": 0.1379,
"step": 66
},
{
"epoch": 0.423464349200079,
"grad_norm": 0.1142578125,
"learning_rate": 0.00019264493612398481,
"loss": 0.1669,
"step": 67
},
{
"epoch": 0.42978471262097573,
"grad_norm": 0.09619140625,
"learning_rate": 0.0001923879532511287,
"loss": 0.1279,
"step": 68
},
{
"epoch": 0.43610507604187243,
"grad_norm": 0.10986328125,
"learning_rate": 0.0001921267351436808,
"loss": 0.1535,
"step": 69
},
{
"epoch": 0.44242543946276913,
"grad_norm": 0.11962890625,
"learning_rate": 0.0001918612937763622,
"loss": 0.1697,
"step": 70
},
{
"epoch": 0.44874580288366583,
"grad_norm": 0.107421875,
"learning_rate": 0.00019159164131749587,
"loss": 0.166,
"step": 71
},
{
"epoch": 0.45506616630456254,
"grad_norm": 0.10986328125,
"learning_rate": 0.00019131779012844912,
"loss": 0.1508,
"step": 72
},
{
"epoch": 0.46138652972545924,
"grad_norm": 0.1201171875,
"learning_rate": 0.00019103975276306678,
"loss": 0.1617,
"step": 73
},
{
"epoch": 0.46770689314635594,
"grad_norm": 0.1103515625,
"learning_rate": 0.00019075754196709572,
"loss": 0.1436,
"step": 74
},
{
"epoch": 0.47402725656725264,
"grad_norm": 0.1025390625,
"learning_rate": 0.0001904711706776006,
"loss": 0.1408,
"step": 75
},
{
"epoch": 0.48034761998814934,
"grad_norm": 0.1171875,
"learning_rate": 0.00019018065202237083,
"loss": 0.1594,
"step": 76
},
{
"epoch": 0.48666798340904605,
"grad_norm": 0.10546875,
"learning_rate": 0.00018988599931931866,
"loss": 0.1394,
"step": 77
},
{
"epoch": 0.49298834682994275,
"grad_norm": 0.111328125,
"learning_rate": 0.0001895872260758688,
"loss": 0.1448,
"step": 78
},
{
"epoch": 0.49930871025083945,
"grad_norm": 0.111328125,
"learning_rate": 0.00018928434598833912,
"loss": 0.156,
"step": 79
},
{
"epoch": 0.5056290736717362,
"grad_norm": 0.125,
"learning_rate": 0.00018897737294131284,
"loss": 0.1692,
"step": 80
},
{
"epoch": 0.5056290736717362,
"eval_loss": 0.15801414847373962,
"eval_runtime": 57.0109,
"eval_samples_per_second": 18.698,
"eval_steps_per_second": 18.698,
"step": 80
},
{
"epoch": 0.5119494370926329,
"grad_norm": 0.10400390625,
"learning_rate": 0.00018866632100700197,
"loss": 0.1318,
"step": 81
},
{
"epoch": 0.5182698005135296,
"grad_norm": 0.1357421875,
"learning_rate": 0.0001883512044446023,
"loss": 0.203,
"step": 82
},
{
"epoch": 0.5245901639344263,
"grad_norm": 0.126953125,
"learning_rate": 0.00018803203769963967,
"loss": 0.1968,
"step": 83
},
{
"epoch": 0.530910527355323,
"grad_norm": 0.1025390625,
"learning_rate": 0.0001877088354033077,
"loss": 0.139,
"step": 84
},
{
"epoch": 0.5372308907762197,
"grad_norm": 0.1044921875,
"learning_rate": 0.0001873816123717973,
"loss": 0.1301,
"step": 85
},
{
"epoch": 0.5435512541971164,
"grad_norm": 0.107421875,
"learning_rate": 0.0001870503836056172,
"loss": 0.1253,
"step": 86
},
{
"epoch": 0.5498716176180131,
"grad_norm": 0.11474609375,
"learning_rate": 0.00018671516428890648,
"loss": 0.1575,
"step": 87
},
{
"epoch": 0.5561919810389098,
"grad_norm": 0.12353515625,
"learning_rate": 0.00018637596978873835,
"loss": 0.1627,
"step": 88
},
{
"epoch": 0.5625123444598065,
"grad_norm": 0.1279296875,
"learning_rate": 0.00018603281565441585,
"loss": 0.1762,
"step": 89
},
{
"epoch": 0.5688327078807032,
"grad_norm": 0.1259765625,
"learning_rate": 0.00018568571761675893,
"loss": 0.1537,
"step": 90
},
{
"epoch": 0.5751530713015999,
"grad_norm": 0.1259765625,
"learning_rate": 0.00018533469158738344,
"loss": 0.1752,
"step": 91
},
{
"epoch": 0.5814734347224966,
"grad_norm": 0.11865234375,
"learning_rate": 0.0001849797536579715,
"loss": 0.1441,
"step": 92
},
{
"epoch": 0.5877937981433933,
"grad_norm": 0.1083984375,
"learning_rate": 0.00018462092009953408,
"loss": 0.1471,
"step": 93
},
{
"epoch": 0.59411416156429,
"grad_norm": 0.1201171875,
"learning_rate": 0.0001842582073616649,
"loss": 0.1584,
"step": 94
},
{
"epoch": 0.6004345249851867,
"grad_norm": 0.1005859375,
"learning_rate": 0.00018389163207178656,
"loss": 0.1255,
"step": 95
},
{
"epoch": 0.6067548884060834,
"grad_norm": 0.10595703125,
"learning_rate": 0.000183521211034388,
"loss": 0.1401,
"step": 96
},
{
"epoch": 0.6067548884060834,
"eval_loss": 0.1516103893518448,
"eval_runtime": 57.9856,
"eval_samples_per_second": 18.384,
"eval_steps_per_second": 18.384,
"step": 96
},
{
"epoch": 0.6130752518269801,
"grad_norm": 0.0966796875,
"learning_rate": 0.00018314696123025454,
"loss": 0.1359,
"step": 97
},
{
"epoch": 0.6193956152478768,
"grad_norm": 0.107421875,
"learning_rate": 0.00018276889981568906,
"loss": 0.1354,
"step": 98
},
{
"epoch": 0.6257159786687735,
"grad_norm": 0.10986328125,
"learning_rate": 0.00018238704412172586,
"loss": 0.1315,
"step": 99
},
{
"epoch": 0.6320363420896702,
"grad_norm": 0.1083984375,
"learning_rate": 0.0001820014116533359,
"loss": 0.1522,
"step": 100
},
{
"epoch": 0.6383567055105669,
"grad_norm": 0.11767578125,
"learning_rate": 0.00018161202008862458,
"loss": 0.1754,
"step": 101
},
{
"epoch": 0.6446770689314636,
"grad_norm": 0.1025390625,
"learning_rate": 0.00018121888727802113,
"loss": 0.1368,
"step": 102
},
{
"epoch": 0.6509974323523603,
"grad_norm": 0.09814453125,
"learning_rate": 0.00018082203124346045,
"loss": 0.14,
"step": 103
},
{
"epoch": 0.657317795773257,
"grad_norm": 0.115234375,
"learning_rate": 0.0001804214701775569,
"loss": 0.157,
"step": 104
},
{
"epoch": 0.6636381591941537,
"grad_norm": 0.11376953125,
"learning_rate": 0.00018001722244277035,
"loss": 0.1575,
"step": 105
},
{
"epoch": 0.6699585226150504,
"grad_norm": 0.09765625,
"learning_rate": 0.00017960930657056438,
"loss": 0.1229,
"step": 106
},
{
"epoch": 0.6762788860359471,
"grad_norm": 0.099609375,
"learning_rate": 0.00017919774126055673,
"loss": 0.1294,
"step": 107
},
{
"epoch": 0.6825992494568438,
"grad_norm": 0.09716796875,
"learning_rate": 0.00017878254537966216,
"loss": 0.1381,
"step": 108
},
{
"epoch": 0.6889196128777405,
"grad_norm": 0.11083984375,
"learning_rate": 0.0001783637379612275,
"loss": 0.1494,
"step": 109
},
{
"epoch": 0.6952399762986372,
"grad_norm": 0.107421875,
"learning_rate": 0.00017794133820415916,
"loss": 0.1527,
"step": 110
},
{
"epoch": 0.7015603397195339,
"grad_norm": 0.111328125,
"learning_rate": 0.00017751536547204295,
"loss": 0.1335,
"step": 111
},
{
"epoch": 0.7078807031404306,
"grad_norm": 0.10400390625,
"learning_rate": 0.0001770858392922565,
"loss": 0.1204,
"step": 112
},
{
"epoch": 0.7078807031404306,
"eval_loss": 0.14627417922019958,
"eval_runtime": 56.8973,
"eval_samples_per_second": 18.736,
"eval_steps_per_second": 18.736,
"step": 112
},
{
"epoch": 0.7142010665613273,
"grad_norm": 0.09814453125,
"learning_rate": 0.00017665277935507398,
"loss": 0.122,
"step": 113
},
{
"epoch": 0.720521429982224,
"grad_norm": 0.0966796875,
"learning_rate": 0.00017621620551276366,
"loss": 0.134,
"step": 114
},
{
"epoch": 0.7268417934031207,
"grad_norm": 0.09765625,
"learning_rate": 0.00017577613777867762,
"loss": 0.1185,
"step": 115
},
{
"epoch": 0.7331621568240174,
"grad_norm": 0.1064453125,
"learning_rate": 0.00017533259632633442,
"loss": 0.1337,
"step": 116
},
{
"epoch": 0.7394825202449141,
"grad_norm": 0.130859375,
"learning_rate": 0.00017488560148849427,
"loss": 0.1503,
"step": 117
},
{
"epoch": 0.7458028836658108,
"grad_norm": 0.10791015625,
"learning_rate": 0.00017443517375622704,
"loss": 0.1594,
"step": 118
},
{
"epoch": 0.7521232470867075,
"grad_norm": 0.1181640625,
"learning_rate": 0.0001739813337779727,
"loss": 0.1397,
"step": 119
},
{
"epoch": 0.7584436105076042,
"grad_norm": 0.10302734375,
"learning_rate": 0.00017352410235859503,
"loss": 0.1314,
"step": 120
},
{
"epoch": 0.7647639739285009,
"grad_norm": 0.115234375,
"learning_rate": 0.0001730635004584276,
"loss": 0.1466,
"step": 121
},
{
"epoch": 0.7710843373493976,
"grad_norm": 0.0986328125,
"learning_rate": 0.0001725995491923131,
"loss": 0.1147,
"step": 122
},
{
"epoch": 0.7774047007702943,
"grad_norm": 0.134765625,
"learning_rate": 0.0001721322698286354,
"loss": 0.1637,
"step": 123
},
{
"epoch": 0.783725064191191,
"grad_norm": 0.11279296875,
"learning_rate": 0.00017166168378834448,
"loss": 0.138,
"step": 124
},
{
"epoch": 0.7900454276120877,
"grad_norm": 0.11767578125,
"learning_rate": 0.00017118781264397446,
"loss": 0.1511,
"step": 125
},
{
"epoch": 0.7963657910329844,
"grad_norm": 0.115234375,
"learning_rate": 0.00017071067811865476,
"loss": 0.1407,
"step": 126
},
{
"epoch": 0.8026861544538811,
"grad_norm": 0.1064453125,
"learning_rate": 0.0001702303020851142,
"loss": 0.1342,
"step": 127
},
{
"epoch": 0.8090065178747778,
"grad_norm": 0.11865234375,
"learning_rate": 0.00016974670656467824,
"loss": 0.1336,
"step": 128
},
{
"epoch": 0.8090065178747778,
"eval_loss": 0.14203742146492004,
"eval_runtime": 58.4188,
"eval_samples_per_second": 18.248,
"eval_steps_per_second": 18.248,
"step": 128
},
{
"epoch": 0.8153268812956745,
"grad_norm": 0.1083984375,
"learning_rate": 0.0001692599137262597,
"loss": 0.1543,
"step": 129
},
{
"epoch": 0.8216472447165712,
"grad_norm": 0.0888671875,
"learning_rate": 0.00016876994588534234,
"loss": 0.1116,
"step": 130
},
{
"epoch": 0.827967608137468,
"grad_norm": 0.1044921875,
"learning_rate": 0.00016827682550295785,
"loss": 0.1267,
"step": 131
},
{
"epoch": 0.8342879715583646,
"grad_norm": 0.10400390625,
"learning_rate": 0.0001677805751846563,
"loss": 0.1361,
"step": 132
},
{
"epoch": 0.8406083349792614,
"grad_norm": 0.11376953125,
"learning_rate": 0.00016728121767946977,
"loss": 0.1372,
"step": 133
},
{
"epoch": 0.846928698400158,
"grad_norm": 0.10498046875,
"learning_rate": 0.00016677877587886956,
"loss": 0.1258,
"step": 134
},
{
"epoch": 0.8532490618210548,
"grad_norm": 0.11669921875,
"learning_rate": 0.00016627327281571678,
"loss": 0.1427,
"step": 135
},
{
"epoch": 0.8595694252419515,
"grad_norm": 0.1328125,
"learning_rate": 0.00016576473166320644,
"loss": 0.187,
"step": 136
},
{
"epoch": 0.8658897886628482,
"grad_norm": 0.1044921875,
"learning_rate": 0.00016525317573380525,
"loss": 0.1417,
"step": 137
},
{
"epoch": 0.8722101520837449,
"grad_norm": 0.10107421875,
"learning_rate": 0.00016473862847818277,
"loss": 0.1399,
"step": 138
},
{
"epoch": 0.8785305155046416,
"grad_norm": 0.1025390625,
"learning_rate": 0.00016422111348413657,
"loss": 0.141,
"step": 139
},
{
"epoch": 0.8848508789255383,
"grad_norm": 0.1005859375,
"learning_rate": 0.00016370065447551078,
"loss": 0.1236,
"step": 140
},
{
"epoch": 0.891171242346435,
"grad_norm": 0.10205078125,
"learning_rate": 0.0001631772753111086,
"loss": 0.1236,
"step": 141
},
{
"epoch": 0.8974916057673317,
"grad_norm": 0.10302734375,
"learning_rate": 0.00016265099998359866,
"loss": 0.128,
"step": 142
},
{
"epoch": 0.9038119691882284,
"grad_norm": 0.09814453125,
"learning_rate": 0.00016212185261841499,
"loss": 0.1227,
"step": 143
},
{
"epoch": 0.9101323326091251,
"grad_norm": 0.1064453125,
"learning_rate": 0.00016158985747265108,
"loss": 0.1339,
"step": 144
},
{
"epoch": 0.9101323326091251,
"eval_loss": 0.1379576325416565,
"eval_runtime": 61.2763,
"eval_samples_per_second": 17.397,
"eval_steps_per_second": 17.397,
"step": 144
},
{
"epoch": 0.9164526960300218,
"grad_norm": 0.10009765625,
"learning_rate": 0.00016105503893394806,
"loss": 0.1255,
"step": 145
},
{
"epoch": 0.9227730594509185,
"grad_norm": 0.103515625,
"learning_rate": 0.00016051742151937655,
"loss": 0.1237,
"step": 146
},
{
"epoch": 0.9290934228718152,
"grad_norm": 0.10009765625,
"learning_rate": 0.0001599770298743128,
"loss": 0.1212,
"step": 147
},
{
"epoch": 0.9354137862927119,
"grad_norm": 0.1123046875,
"learning_rate": 0.000159433888771309,
"loss": 0.1402,
"step": 148
},
{
"epoch": 0.9417341497136086,
"grad_norm": 0.11865234375,
"learning_rate": 0.00015888802310895742,
"loss": 0.1475,
"step": 149
},
{
"epoch": 0.9480545131345053,
"grad_norm": 0.1064453125,
"learning_rate": 0.00015833945791074943,
"loss": 0.1133,
"step": 150
},
{
"epoch": 0.954374876555402,
"grad_norm": 0.10986328125,
"learning_rate": 0.00015778821832392777,
"loss": 0.1336,
"step": 151
},
{
"epoch": 0.9606952399762987,
"grad_norm": 0.1162109375,
"learning_rate": 0.0001572343296183344,
"loss": 0.1479,
"step": 152
},
{
"epoch": 0.9670156033971954,
"grad_norm": 0.10546875,
"learning_rate": 0.00015667781718525157,
"loss": 0.1291,
"step": 153
},
{
"epoch": 0.9733359668180921,
"grad_norm": 0.11181640625,
"learning_rate": 0.00015611870653623825,
"loss": 0.1337,
"step": 154
},
{
"epoch": 0.9796563302389888,
"grad_norm": 0.09228515625,
"learning_rate": 0.00015555702330196023,
"loss": 0.1024,
"step": 155
},
{
"epoch": 0.9859766936598855,
"grad_norm": 0.10791015625,
"learning_rate": 0.0001549927932310155,
"loss": 0.1565,
"step": 156
},
{
"epoch": 0.9922970570807822,
"grad_norm": 0.10595703125,
"learning_rate": 0.0001544260421887537,
"loss": 0.1328,
"step": 157
},
{
"epoch": 0.9986174205016789,
"grad_norm": 0.1142578125,
"learning_rate": 0.00015385679615609042,
"loss": 0.164,
"step": 158
}
],
"logging_steps": 1,
"max_steps": 474,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 79,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.7720565660267315e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}