FatCat87's picture
Upload folder using huggingface_hub
5bbd553 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9956709956709957,
"eval_steps": 58,
"global_step": 115,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008658008658008658,
"grad_norm": 0.11093204468488693,
"learning_rate": 2e-05,
"loss": 1.5798,
"step": 1
},
{
"epoch": 0.008658008658008658,
"eval_loss": 1.570786714553833,
"eval_runtime": 25.8433,
"eval_samples_per_second": 246.718,
"eval_steps_per_second": 30.84,
"step": 1
},
{
"epoch": 0.017316017316017316,
"grad_norm": 0.13193683326244354,
"learning_rate": 4e-05,
"loss": 1.6109,
"step": 2
},
{
"epoch": 0.025974025974025976,
"grad_norm": 0.12040749937295914,
"learning_rate": 6e-05,
"loss": 1.5904,
"step": 3
},
{
"epoch": 0.03463203463203463,
"grad_norm": 0.1296212375164032,
"learning_rate": 8e-05,
"loss": 1.5825,
"step": 4
},
{
"epoch": 0.04329004329004329,
"grad_norm": 0.15250156819820404,
"learning_rate": 0.0001,
"loss": 1.6009,
"step": 5
},
{
"epoch": 0.05194805194805195,
"grad_norm": 0.19382008910179138,
"learning_rate": 0.00012,
"loss": 1.5556,
"step": 6
},
{
"epoch": 0.06060606060606061,
"grad_norm": 0.22872094810009003,
"learning_rate": 0.00014,
"loss": 1.5656,
"step": 7
},
{
"epoch": 0.06926406926406926,
"grad_norm": 0.23726485669612885,
"learning_rate": 0.00016,
"loss": 1.47,
"step": 8
},
{
"epoch": 0.07792207792207792,
"grad_norm": 0.22901910543441772,
"learning_rate": 0.00018,
"loss": 1.3961,
"step": 9
},
{
"epoch": 0.08658008658008658,
"grad_norm": 0.20660609006881714,
"learning_rate": 0.0002,
"loss": 1.3514,
"step": 10
},
{
"epoch": 0.09523809523809523,
"grad_norm": 0.16906796395778656,
"learning_rate": 0.00019995524322835034,
"loss": 1.254,
"step": 11
},
{
"epoch": 0.1038961038961039,
"grad_norm": 0.16580761969089508,
"learning_rate": 0.0001998210129767735,
"loss": 1.2026,
"step": 12
},
{
"epoch": 0.11255411255411256,
"grad_norm": 0.23203064501285553,
"learning_rate": 0.00019959742939952392,
"loss": 1.1739,
"step": 13
},
{
"epoch": 0.12121212121212122,
"grad_norm": 0.23922611773014069,
"learning_rate": 0.00019928469263418374,
"loss": 1.103,
"step": 14
},
{
"epoch": 0.12987012987012986,
"grad_norm": 0.19562934339046478,
"learning_rate": 0.00019888308262251285,
"loss": 1.0532,
"step": 15
},
{
"epoch": 0.13852813852813853,
"grad_norm": 0.1417841911315918,
"learning_rate": 0.00019839295885986296,
"loss": 1.034,
"step": 16
},
{
"epoch": 0.1471861471861472,
"grad_norm": 0.11438935995101929,
"learning_rate": 0.00019781476007338058,
"loss": 1.0186,
"step": 17
},
{
"epoch": 0.15584415584415584,
"grad_norm": 0.10540100187063217,
"learning_rate": 0.00019714900382928675,
"loss": 0.9839,
"step": 18
},
{
"epoch": 0.1645021645021645,
"grad_norm": 0.10455407947301865,
"learning_rate": 0.00019639628606958533,
"loss": 0.986,
"step": 19
},
{
"epoch": 0.17316017316017315,
"grad_norm": 0.10251545161008835,
"learning_rate": 0.0001955572805786141,
"loss": 0.9835,
"step": 20
},
{
"epoch": 0.18181818181818182,
"grad_norm": 0.09409354627132416,
"learning_rate": 0.00019463273837991643,
"loss": 0.9595,
"step": 21
},
{
"epoch": 0.19047619047619047,
"grad_norm": 0.08605455607175827,
"learning_rate": 0.00019362348706397373,
"loss": 0.9622,
"step": 22
},
{
"epoch": 0.19913419913419914,
"grad_norm": 0.08584555238485336,
"learning_rate": 0.00019253043004739968,
"loss": 0.9529,
"step": 23
},
{
"epoch": 0.2077922077922078,
"grad_norm": 0.07192489504814148,
"learning_rate": 0.0001913545457642601,
"loss": 0.9383,
"step": 24
},
{
"epoch": 0.21645021645021645,
"grad_norm": 0.06461908668279648,
"learning_rate": 0.0001900968867902419,
"loss": 0.9182,
"step": 25
},
{
"epoch": 0.22510822510822512,
"grad_norm": 0.06550411134958267,
"learning_rate": 0.00018875857890045543,
"loss": 0.8982,
"step": 26
},
{
"epoch": 0.23376623376623376,
"grad_norm": 0.07315943390130997,
"learning_rate": 0.00018734082006171299,
"loss": 0.9074,
"step": 27
},
{
"epoch": 0.24242424242424243,
"grad_norm": 0.07087890803813934,
"learning_rate": 0.00018584487936018661,
"loss": 0.9125,
"step": 28
},
{
"epoch": 0.2510822510822511,
"grad_norm": 0.06873635947704315,
"learning_rate": 0.0001842720958654039,
"loss": 0.8867,
"step": 29
},
{
"epoch": 0.2597402597402597,
"grad_norm": 0.06812155246734619,
"learning_rate": 0.0001826238774315995,
"loss": 0.8891,
"step": 30
},
{
"epoch": 0.2683982683982684,
"grad_norm": 0.08119972795248032,
"learning_rate": 0.00018090169943749476,
"loss": 0.8794,
"step": 31
},
{
"epoch": 0.27705627705627706,
"grad_norm": 0.060435950756073,
"learning_rate": 0.00017910710346563416,
"loss": 0.8807,
"step": 32
},
{
"epoch": 0.2857142857142857,
"grad_norm": 0.057483069598674774,
"learning_rate": 0.00017724169592245995,
"loss": 0.8789,
"step": 33
},
{
"epoch": 0.2943722943722944,
"grad_norm": 0.055934980511665344,
"learning_rate": 0.00017530714660036112,
"loss": 0.8631,
"step": 34
},
{
"epoch": 0.30303030303030304,
"grad_norm": 0.07267452776432037,
"learning_rate": 0.00017330518718298264,
"loss": 0.838,
"step": 35
},
{
"epoch": 0.3116883116883117,
"grad_norm": 0.05725516006350517,
"learning_rate": 0.0001712376096951345,
"loss": 0.8491,
"step": 36
},
{
"epoch": 0.3203463203463203,
"grad_norm": 0.05821891874074936,
"learning_rate": 0.00016910626489868649,
"loss": 0.8376,
"step": 37
},
{
"epoch": 0.329004329004329,
"grad_norm": 0.06176719069480896,
"learning_rate": 0.00016691306063588583,
"loss": 0.8397,
"step": 38
},
{
"epoch": 0.33766233766233766,
"grad_norm": 0.051375944167375565,
"learning_rate": 0.00016465996012157995,
"loss": 0.8449,
"step": 39
},
{
"epoch": 0.3463203463203463,
"grad_norm": 0.05730742588639259,
"learning_rate": 0.00016234898018587337,
"loss": 0.8705,
"step": 40
},
{
"epoch": 0.354978354978355,
"grad_norm": 0.053983114659786224,
"learning_rate": 0.00015998218946879138,
"loss": 0.8399,
"step": 41
},
{
"epoch": 0.36363636363636365,
"grad_norm": 0.05474100634455681,
"learning_rate": 0.00015756170656856737,
"loss": 0.8219,
"step": 42
},
{
"epoch": 0.3722943722943723,
"grad_norm": 0.0504613034427166,
"learning_rate": 0.00015508969814521025,
"loss": 0.8137,
"step": 43
},
{
"epoch": 0.38095238095238093,
"grad_norm": 0.04792892560362816,
"learning_rate": 0.00015256837698105047,
"loss": 0.8327,
"step": 44
},
{
"epoch": 0.38961038961038963,
"grad_norm": 0.04982732608914375,
"learning_rate": 0.00015000000000000001,
"loss": 0.8509,
"step": 45
},
{
"epoch": 0.39826839826839827,
"grad_norm": 0.04842912778258324,
"learning_rate": 0.00014738686624729986,
"loss": 0.8114,
"step": 46
},
{
"epoch": 0.4069264069264069,
"grad_norm": 0.04842810332775116,
"learning_rate": 0.00014473131483156327,
"loss": 0.8104,
"step": 47
},
{
"epoch": 0.4155844155844156,
"grad_norm": 0.052241936326026917,
"learning_rate": 0.00014203572283095657,
"loss": 0.8121,
"step": 48
},
{
"epoch": 0.42424242424242425,
"grad_norm": 0.0497763529419899,
"learning_rate": 0.00013930250316539238,
"loss": 0.8251,
"step": 49
},
{
"epoch": 0.4329004329004329,
"grad_norm": 0.050107311457395554,
"learning_rate": 0.00013653410243663952,
"loss": 0.8286,
"step": 50
},
{
"epoch": 0.44155844155844154,
"grad_norm": 0.04895767569541931,
"learning_rate": 0.00013373299873828303,
"loss": 0.8227,
"step": 51
},
{
"epoch": 0.45021645021645024,
"grad_norm": 0.04833944886922836,
"learning_rate": 0.00013090169943749476,
"loss": 0.7994,
"step": 52
},
{
"epoch": 0.4588744588744589,
"grad_norm": 0.050548214465379715,
"learning_rate": 0.00012804273893060028,
"loss": 0.8223,
"step": 53
},
{
"epoch": 0.4675324675324675,
"grad_norm": 0.05153653398156166,
"learning_rate": 0.00012515867637445086,
"loss": 0.8076,
"step": 54
},
{
"epoch": 0.47619047619047616,
"grad_norm": 0.05172652378678322,
"learning_rate": 0.00012225209339563145,
"loss": 0.8329,
"step": 55
},
{
"epoch": 0.48484848484848486,
"grad_norm": 0.04874265193939209,
"learning_rate": 0.00011932559177955533,
"loss": 0.8061,
"step": 56
},
{
"epoch": 0.4935064935064935,
"grad_norm": 0.05159593001008034,
"learning_rate": 0.00011638179114151377,
"loss": 0.7794,
"step": 57
},
{
"epoch": 0.5021645021645021,
"grad_norm": 0.054299090057611465,
"learning_rate": 0.00011342332658176555,
"loss": 0.7984,
"step": 58
},
{
"epoch": 0.5021645021645021,
"eval_loss": 0.788051426410675,
"eval_runtime": 28.9237,
"eval_samples_per_second": 220.442,
"eval_steps_per_second": 27.555,
"step": 58
},
{
"epoch": 0.5108225108225108,
"grad_norm": 0.04983401671051979,
"learning_rate": 0.00011045284632676536,
"loss": 0.794,
"step": 59
},
{
"epoch": 0.5194805194805194,
"grad_norm": 0.0501757487654686,
"learning_rate": 0.00010747300935864243,
"loss": 0.7891,
"step": 60
},
{
"epoch": 0.5281385281385281,
"grad_norm": 0.04997857287526131,
"learning_rate": 0.00010448648303505151,
"loss": 0.7903,
"step": 61
},
{
"epoch": 0.5367965367965368,
"grad_norm": 0.048633065074682236,
"learning_rate": 0.00010149594070152638,
"loss": 0.7792,
"step": 62
},
{
"epoch": 0.5454545454545454,
"grad_norm": 0.05213923007249832,
"learning_rate": 9.850405929847366e-05,
"loss": 0.7935,
"step": 63
},
{
"epoch": 0.5541125541125541,
"grad_norm": 0.04998105391860008,
"learning_rate": 9.551351696494854e-05,
"loss": 0.7744,
"step": 64
},
{
"epoch": 0.5627705627705628,
"grad_norm": 0.05255527049303055,
"learning_rate": 9.252699064135758e-05,
"loss": 0.7837,
"step": 65
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.05218357592821121,
"learning_rate": 8.954715367323468e-05,
"loss": 0.7857,
"step": 66
},
{
"epoch": 0.5800865800865801,
"grad_norm": 0.05224675312638283,
"learning_rate": 8.657667341823448e-05,
"loss": 0.7762,
"step": 67
},
{
"epoch": 0.5887445887445888,
"grad_norm": 0.050885338336229324,
"learning_rate": 8.361820885848624e-05,
"loss": 0.7722,
"step": 68
},
{
"epoch": 0.5974025974025974,
"grad_norm": 0.05296636000275612,
"learning_rate": 8.067440822044469e-05,
"loss": 0.7946,
"step": 69
},
{
"epoch": 0.6060606060606061,
"grad_norm": 0.051182664930820465,
"learning_rate": 7.774790660436858e-05,
"loss": 0.7684,
"step": 70
},
{
"epoch": 0.6147186147186147,
"grad_norm": 0.05467361956834793,
"learning_rate": 7.484132362554915e-05,
"loss": 0.8165,
"step": 71
},
{
"epoch": 0.6233766233766234,
"grad_norm": 0.05064595863223076,
"learning_rate": 7.195726106939974e-05,
"loss": 0.7784,
"step": 72
},
{
"epoch": 0.6320346320346321,
"grad_norm": 0.05015125870704651,
"learning_rate": 6.909830056250527e-05,
"loss": 0.7687,
"step": 73
},
{
"epoch": 0.6406926406926406,
"grad_norm": 0.0537785179913044,
"learning_rate": 6.626700126171702e-05,
"loss": 0.7766,
"step": 74
},
{
"epoch": 0.6493506493506493,
"grad_norm": 0.051856957376003265,
"learning_rate": 6.34658975633605e-05,
"loss": 0.7737,
"step": 75
},
{
"epoch": 0.658008658008658,
"grad_norm": 0.05247452110052109,
"learning_rate": 6.069749683460765e-05,
"loss": 0.7688,
"step": 76
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.05488152056932449,
"learning_rate": 5.796427716904347e-05,
"loss": 0.7825,
"step": 77
},
{
"epoch": 0.6753246753246753,
"grad_norm": 0.053116098046302795,
"learning_rate": 5.526868516843673e-05,
"loss": 0.7796,
"step": 78
},
{
"epoch": 0.683982683982684,
"grad_norm": 0.05379800498485565,
"learning_rate": 5.261313375270014e-05,
"loss": 0.7976,
"step": 79
},
{
"epoch": 0.6926406926406926,
"grad_norm": 0.0555279403924942,
"learning_rate": 5.000000000000002e-05,
"loss": 0.774,
"step": 80
},
{
"epoch": 0.7012987012987013,
"grad_norm": 0.05348188057541847,
"learning_rate": 4.743162301894952e-05,
"loss": 0.7782,
"step": 81
},
{
"epoch": 0.70995670995671,
"grad_norm": 0.05335194244980812,
"learning_rate": 4.491030185478976e-05,
"loss": 0.769,
"step": 82
},
{
"epoch": 0.7186147186147186,
"grad_norm": 0.05507886782288551,
"learning_rate": 4.2438293431432665e-05,
"loss": 0.7765,
"step": 83
},
{
"epoch": 0.7272727272727273,
"grad_norm": 0.05403213948011398,
"learning_rate": 4.001781053120863e-05,
"loss": 0.7684,
"step": 84
},
{
"epoch": 0.7359307359307359,
"grad_norm": 0.05368750914931297,
"learning_rate": 3.7651019814126654e-05,
"loss": 0.7693,
"step": 85
},
{
"epoch": 0.7445887445887446,
"grad_norm": 0.0559493824839592,
"learning_rate": 3.534003987842005e-05,
"loss": 0.7867,
"step": 86
},
{
"epoch": 0.7532467532467533,
"grad_norm": 0.05538685619831085,
"learning_rate": 3.308693936411421e-05,
"loss": 0.7789,
"step": 87
},
{
"epoch": 0.7619047619047619,
"grad_norm": 0.054037969559431076,
"learning_rate": 3.089373510131354e-05,
"loss": 0.7672,
"step": 88
},
{
"epoch": 0.7705627705627706,
"grad_norm": 0.054057423025369644,
"learning_rate": 2.876239030486554e-05,
"loss": 0.7759,
"step": 89
},
{
"epoch": 0.7792207792207793,
"grad_norm": 0.05448087304830551,
"learning_rate": 2.669481281701739e-05,
"loss": 0.7737,
"step": 90
},
{
"epoch": 0.7878787878787878,
"grad_norm": 0.05279451981186867,
"learning_rate": 2.4692853399638917e-05,
"loss": 0.7617,
"step": 91
},
{
"epoch": 0.7965367965367965,
"grad_norm": 0.055114444345235825,
"learning_rate": 2.275830407754006e-05,
"loss": 0.7658,
"step": 92
},
{
"epoch": 0.8051948051948052,
"grad_norm": 0.05524512380361557,
"learning_rate": 2.0892896534365904e-05,
"loss": 0.787,
"step": 93
},
{
"epoch": 0.8138528138528138,
"grad_norm": 0.05598621442914009,
"learning_rate": 1.9098300562505266e-05,
"loss": 0.7608,
"step": 94
},
{
"epoch": 0.8225108225108225,
"grad_norm": 0.055263664573431015,
"learning_rate": 1.7376122568400532e-05,
"loss": 0.7751,
"step": 95
},
{
"epoch": 0.8311688311688312,
"grad_norm": 0.053236011415719986,
"learning_rate": 1.5727904134596083e-05,
"loss": 0.7541,
"step": 96
},
{
"epoch": 0.8398268398268398,
"grad_norm": 0.05485166609287262,
"learning_rate": 1.415512063981339e-05,
"loss": 0.7619,
"step": 97
},
{
"epoch": 0.8484848484848485,
"grad_norm": 0.0545911081135273,
"learning_rate": 1.2659179938287035e-05,
"loss": 0.7705,
"step": 98
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.05586208403110504,
"learning_rate": 1.124142109954459e-05,
"loss": 0.7552,
"step": 99
},
{
"epoch": 0.8658008658008658,
"grad_norm": 0.05504390969872475,
"learning_rate": 9.903113209758096e-06,
"loss": 0.7639,
"step": 100
},
{
"epoch": 0.8744588744588745,
"grad_norm": 0.0530891939997673,
"learning_rate": 8.645454235739903e-06,
"loss": 0.7605,
"step": 101
},
{
"epoch": 0.8831168831168831,
"grad_norm": 0.05585161969065666,
"learning_rate": 7.46956995260033e-06,
"loss": 0.7837,
"step": 102
},
{
"epoch": 0.8917748917748918,
"grad_norm": 0.05504593625664711,
"learning_rate": 6.37651293602628e-06,
"loss": 0.7625,
"step": 103
},
{
"epoch": 0.9004329004329005,
"grad_norm": 0.05624198541045189,
"learning_rate": 5.367261620083575e-06,
"loss": 0.7876,
"step": 104
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.05238276347517967,
"learning_rate": 4.442719421385922e-06,
"loss": 0.7626,
"step": 105
},
{
"epoch": 0.9177489177489178,
"grad_norm": 0.05504492297768593,
"learning_rate": 3.6037139304146762e-06,
"loss": 0.776,
"step": 106
},
{
"epoch": 0.9264069264069265,
"grad_norm": 0.05449540913105011,
"learning_rate": 2.8509961707132494e-06,
"loss": 0.7873,
"step": 107
},
{
"epoch": 0.935064935064935,
"grad_norm": 0.054615579545497894,
"learning_rate": 2.1852399266194314e-06,
"loss": 0.7808,
"step": 108
},
{
"epoch": 0.9437229437229437,
"grad_norm": 0.05515173450112343,
"learning_rate": 1.6070411401370334e-06,
"loss": 0.7746,
"step": 109
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.05492587760090828,
"learning_rate": 1.1169173774871478e-06,
"loss": 0.7745,
"step": 110
},
{
"epoch": 0.961038961038961,
"grad_norm": 0.05713028460741043,
"learning_rate": 7.153073658162646e-07,
"loss": 0.7788,
"step": 111
},
{
"epoch": 0.9696969696969697,
"grad_norm": 0.054508037865161896,
"learning_rate": 4.025706004760932e-07,
"loss": 0.7714,
"step": 112
},
{
"epoch": 0.9783549783549783,
"grad_norm": 0.058265261352062225,
"learning_rate": 1.7898702322648453e-07,
"loss": 0.7821,
"step": 113
},
{
"epoch": 0.987012987012987,
"grad_norm": 0.055581748485565186,
"learning_rate": 4.475677164966774e-08,
"loss": 0.7815,
"step": 114
},
{
"epoch": 0.9956709956709957,
"grad_norm": 0.057229481637477875,
"learning_rate": 0.0,
"loss": 0.779,
"step": 115
}
],
"logging_steps": 1,
"max_steps": 115,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.04802976275497e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}