joseagmz's picture
Upload folder using huggingface_hub
0f1e583 verified
raw
history blame contribute delete
No virus
14.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 22,
"global_step": 87,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 19.59183260405101,
"learning_rate": 2.0000000000000003e-06,
"loss": 1.3985,
"step": 1
},
{
"epoch": 0.01,
"eval_loss": 1.5676738023757935,
"eval_runtime": 3.9227,
"eval_samples_per_second": 4.844,
"eval_steps_per_second": 1.275,
"step": 1
},
{
"epoch": 0.02,
"grad_norm": 25.857640141956836,
"learning_rate": 4.000000000000001e-06,
"loss": 1.6073,
"step": 2
},
{
"epoch": 0.03,
"grad_norm": 19.53747244629775,
"learning_rate": 6e-06,
"loss": 1.5784,
"step": 3
},
{
"epoch": 0.05,
"grad_norm": 32.1611390841105,
"learning_rate": 8.000000000000001e-06,
"loss": 1.4924,
"step": 4
},
{
"epoch": 0.06,
"grad_norm": 25.68491589990429,
"learning_rate": 1e-05,
"loss": 1.5215,
"step": 5
},
{
"epoch": 0.07,
"grad_norm": 21.800722806654328,
"learning_rate": 1.2e-05,
"loss": 1.5508,
"step": 6
},
{
"epoch": 0.08,
"grad_norm": 17.19812526688047,
"learning_rate": 1.4000000000000001e-05,
"loss": 1.205,
"step": 7
},
{
"epoch": 0.09,
"grad_norm": 33.24510948526866,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.6376,
"step": 8
},
{
"epoch": 0.1,
"grad_norm": 14.986129451187,
"learning_rate": 1.8e-05,
"loss": 1.454,
"step": 9
},
{
"epoch": 0.11,
"grad_norm": 17.517194747344856,
"learning_rate": 2e-05,
"loss": 1.6559,
"step": 10
},
{
"epoch": 0.13,
"grad_norm": 18.22606604955052,
"learning_rate": 2.2000000000000003e-05,
"loss": 1.5816,
"step": 11
},
{
"epoch": 0.14,
"grad_norm": 26.577328122239592,
"learning_rate": 2.4e-05,
"loss": 1.7884,
"step": 12
},
{
"epoch": 0.15,
"grad_norm": 17.563463503652706,
"learning_rate": 2.6000000000000002e-05,
"loss": 1.4405,
"step": 13
},
{
"epoch": 0.16,
"grad_norm": 19.98739837372538,
"learning_rate": 2.8000000000000003e-05,
"loss": 1.7512,
"step": 14
},
{
"epoch": 0.17,
"grad_norm": 16.25364292099236,
"learning_rate": 3e-05,
"loss": 1.7037,
"step": 15
},
{
"epoch": 0.18,
"grad_norm": 22.328347783615648,
"learning_rate": 3.2000000000000005e-05,
"loss": 1.5476,
"step": 16
},
{
"epoch": 0.2,
"grad_norm": 20.71265405702363,
"learning_rate": 3.4000000000000007e-05,
"loss": 1.8108,
"step": 17
},
{
"epoch": 0.21,
"grad_norm": 22.425069186679085,
"learning_rate": 3.6e-05,
"loss": 1.6751,
"step": 18
},
{
"epoch": 0.22,
"grad_norm": 18.646553829771403,
"learning_rate": 3.8e-05,
"loss": 1.7696,
"step": 19
},
{
"epoch": 0.23,
"grad_norm": 16.74801305116847,
"learning_rate": 4e-05,
"loss": 1.7699,
"step": 20
},
{
"epoch": 0.24,
"grad_norm": 30.99364381661585,
"learning_rate": 4.2e-05,
"loss": 2.1827,
"step": 21
},
{
"epoch": 0.25,
"grad_norm": 19.761245037191404,
"learning_rate": 4.4000000000000006e-05,
"loss": 1.4776,
"step": 22
},
{
"epoch": 0.25,
"eval_loss": 1.856759786605835,
"eval_runtime": 1.4309,
"eval_samples_per_second": 13.279,
"eval_steps_per_second": 3.494,
"step": 22
},
{
"epoch": 0.26,
"grad_norm": 27.821861354666428,
"learning_rate": 4.600000000000001e-05,
"loss": 1.8677,
"step": 23
},
{
"epoch": 0.28,
"grad_norm": 22.80354998875929,
"learning_rate": 4.8e-05,
"loss": 1.7424,
"step": 24
},
{
"epoch": 0.29,
"grad_norm": 19.920321630615994,
"learning_rate": 5e-05,
"loss": 1.769,
"step": 25
},
{
"epoch": 0.3,
"grad_norm": 23.82621534355921,
"learning_rate": 5.2000000000000004e-05,
"loss": 1.8615,
"step": 26
},
{
"epoch": 0.31,
"grad_norm": 27.63826215875828,
"learning_rate": 5.4000000000000005e-05,
"loss": 1.7164,
"step": 27
},
{
"epoch": 0.32,
"grad_norm": 30.810269474286084,
"learning_rate": 5.6000000000000006e-05,
"loss": 1.8922,
"step": 28
},
{
"epoch": 0.33,
"grad_norm": 23.279443617002265,
"learning_rate": 5.8e-05,
"loss": 1.8507,
"step": 29
},
{
"epoch": 0.34,
"grad_norm": 18.091756285883704,
"learning_rate": 6e-05,
"loss": 1.6537,
"step": 30
},
{
"epoch": 0.36,
"grad_norm": 23.292568819411724,
"learning_rate": 6.2e-05,
"loss": 1.9125,
"step": 31
},
{
"epoch": 0.37,
"grad_norm": 52.21913805211888,
"learning_rate": 6.400000000000001e-05,
"loss": 1.8456,
"step": 32
},
{
"epoch": 0.38,
"grad_norm": 30.771918199992527,
"learning_rate": 6.6e-05,
"loss": 1.9398,
"step": 33
},
{
"epoch": 0.39,
"grad_norm": 40.19880102087157,
"learning_rate": 6.800000000000001e-05,
"loss": 2.3763,
"step": 34
},
{
"epoch": 0.4,
"grad_norm": 28.33779955799431,
"learning_rate": 7e-05,
"loss": 2.0815,
"step": 35
},
{
"epoch": 0.41,
"grad_norm": 54.379783823327905,
"learning_rate": 7.2e-05,
"loss": 2.1032,
"step": 36
},
{
"epoch": 0.43,
"grad_norm": 662.6041427579516,
"learning_rate": 7.4e-05,
"loss": 3.5917,
"step": 37
},
{
"epoch": 0.44,
"grad_norm": 4011.501090614335,
"learning_rate": 7.6e-05,
"loss": 7.3933,
"step": 38
},
{
"epoch": 0.45,
"grad_norm": 753.5816146908044,
"learning_rate": 7.800000000000001e-05,
"loss": 15.8371,
"step": 39
},
{
"epoch": 0.46,
"grad_norm": 291.86609997137384,
"learning_rate": 8e-05,
"loss": 9.0249,
"step": 40
},
{
"epoch": 0.47,
"grad_norm": 922.0326051718562,
"learning_rate": 8.2e-05,
"loss": 9.8922,
"step": 41
},
{
"epoch": 0.48,
"grad_norm": 115.68135873083658,
"learning_rate": 8.4e-05,
"loss": 8.2719,
"step": 42
},
{
"epoch": 0.49,
"grad_norm": 693.1702757980898,
"learning_rate": 8.6e-05,
"loss": 24.1699,
"step": 43
},
{
"epoch": 0.51,
"grad_norm": 219.78828591396768,
"learning_rate": 8.800000000000001e-05,
"loss": 10.1246,
"step": 44
},
{
"epoch": 0.51,
"eval_loss": 8.758976936340332,
"eval_runtime": 1.3819,
"eval_samples_per_second": 13.749,
"eval_steps_per_second": 3.618,
"step": 44
},
{
"epoch": 0.52,
"grad_norm": 101.32256273283853,
"learning_rate": 9e-05,
"loss": 9.1756,
"step": 45
},
{
"epoch": 0.53,
"grad_norm": 11.958039992364316,
"learning_rate": 9.200000000000001e-05,
"loss": 7.7273,
"step": 46
},
{
"epoch": 0.54,
"grad_norm": 3232.8188937829473,
"learning_rate": 9.4e-05,
"loss": 8.7486,
"step": 47
},
{
"epoch": 0.55,
"grad_norm": 34.97640980952823,
"learning_rate": 9.6e-05,
"loss": 8.0394,
"step": 48
},
{
"epoch": 0.56,
"grad_norm": 28.785233378323184,
"learning_rate": 9.8e-05,
"loss": 7.6241,
"step": 49
},
{
"epoch": 0.57,
"grad_norm": 734.1733446536996,
"learning_rate": 0.0001,
"loss": 7.7813,
"step": 50
},
{
"epoch": 0.59,
"grad_norm": 59.66290191764372,
"learning_rate": 0.00010200000000000001,
"loss": 7.6734,
"step": 51
},
{
"epoch": 0.6,
"grad_norm": 18.23606863385719,
"learning_rate": 0.00010400000000000001,
"loss": 7.8783,
"step": 52
},
{
"epoch": 0.61,
"grad_norm": 15.152500341579357,
"learning_rate": 0.00010600000000000002,
"loss": 7.6016,
"step": 53
},
{
"epoch": 0.62,
"grad_norm": 25.1062172290986,
"learning_rate": 0.00010800000000000001,
"loss": 7.7206,
"step": 54
},
{
"epoch": 0.63,
"grad_norm": 30.75760910048317,
"learning_rate": 0.00011000000000000002,
"loss": 7.7526,
"step": 55
},
{
"epoch": 0.64,
"grad_norm": 18.354297835205724,
"learning_rate": 0.00011200000000000001,
"loss": 7.5551,
"step": 56
},
{
"epoch": 0.66,
"grad_norm": 32.48061320811231,
"learning_rate": 0.00011399999999999999,
"loss": 7.6922,
"step": 57
},
{
"epoch": 0.67,
"grad_norm": 26.624723764338736,
"learning_rate": 0.000116,
"loss": 7.5791,
"step": 58
},
{
"epoch": 0.68,
"grad_norm": 13.92226298912218,
"learning_rate": 0.000118,
"loss": 7.3692,
"step": 59
},
{
"epoch": 0.69,
"grad_norm": 15.428105634893914,
"learning_rate": 0.00012,
"loss": 7.564,
"step": 60
},
{
"epoch": 0.7,
"grad_norm": 23.02235056392551,
"learning_rate": 0.000122,
"loss": 7.3948,
"step": 61
},
{
"epoch": 0.71,
"grad_norm": 16.470374126805776,
"learning_rate": 0.000124,
"loss": 7.4646,
"step": 62
},
{
"epoch": 0.72,
"grad_norm": 19.574553557429,
"learning_rate": 0.000126,
"loss": 7.3902,
"step": 63
},
{
"epoch": 0.74,
"grad_norm": 25.549182808604662,
"learning_rate": 0.00012800000000000002,
"loss": 7.5461,
"step": 64
},
{
"epoch": 0.75,
"grad_norm": 618.1147749407063,
"learning_rate": 0.00013000000000000002,
"loss": 8.157,
"step": 65
},
{
"epoch": 0.76,
"grad_norm": 66.0610660913446,
"learning_rate": 0.000132,
"loss": 8.1284,
"step": 66
},
{
"epoch": 0.76,
"eval_loss": 8.004860877990723,
"eval_runtime": 1.3748,
"eval_samples_per_second": 13.82,
"eval_steps_per_second": 3.637,
"step": 66
},
{
"epoch": 0.77,
"grad_norm": 41.635960690924975,
"learning_rate": 0.000134,
"loss": 8.0602,
"step": 67
},
{
"epoch": 0.78,
"grad_norm": 16.679869546723175,
"learning_rate": 0.00013600000000000003,
"loss": 7.8268,
"step": 68
},
{
"epoch": 0.79,
"grad_norm": 33.70142265056291,
"learning_rate": 0.000138,
"loss": 8.1206,
"step": 69
},
{
"epoch": 0.8,
"grad_norm": 43.29315748881759,
"learning_rate": 0.00014,
"loss": 7.7882,
"step": 70
},
{
"epoch": 0.82,
"grad_norm": 28.3799834727588,
"learning_rate": 0.000142,
"loss": 7.5454,
"step": 71
},
{
"epoch": 0.83,
"grad_norm": 30.911781055251613,
"learning_rate": 0.000144,
"loss": 7.7616,
"step": 72
},
{
"epoch": 0.84,
"grad_norm": 56.741836296758365,
"learning_rate": 0.000146,
"loss": 8.1145,
"step": 73
},
{
"epoch": 0.85,
"grad_norm": 13.967249574234195,
"learning_rate": 0.000148,
"loss": 7.6758,
"step": 74
},
{
"epoch": 0.86,
"grad_norm": 42.28071588262047,
"learning_rate": 0.00015000000000000001,
"loss": 7.5507,
"step": 75
},
{
"epoch": 0.87,
"grad_norm": 27.40577229308579,
"learning_rate": 0.000152,
"loss": 7.6852,
"step": 76
},
{
"epoch": 0.89,
"grad_norm": 21.422615778584905,
"learning_rate": 0.000154,
"loss": 7.6071,
"step": 77
},
{
"epoch": 0.9,
"grad_norm": 28.22569647336005,
"learning_rate": 0.00015600000000000002,
"loss": 7.5556,
"step": 78
},
{
"epoch": 0.91,
"grad_norm": 20.075502929468385,
"learning_rate": 0.00015800000000000002,
"loss": 7.529,
"step": 79
},
{
"epoch": 0.92,
"grad_norm": 14.896626626967638,
"learning_rate": 0.00016,
"loss": 7.5352,
"step": 80
},
{
"epoch": 0.93,
"grad_norm": 16.98565517398293,
"learning_rate": 0.000162,
"loss": 7.5734,
"step": 81
},
{
"epoch": 0.94,
"grad_norm": 17.76803489316823,
"learning_rate": 0.000164,
"loss": 7.591,
"step": 82
},
{
"epoch": 0.95,
"grad_norm": 15.601674972992182,
"learning_rate": 0.000166,
"loss": 7.4251,
"step": 83
},
{
"epoch": 0.97,
"grad_norm": 21.36580777081032,
"learning_rate": 0.000168,
"loss": 7.728,
"step": 84
},
{
"epoch": 0.98,
"grad_norm": 19.73380900316605,
"learning_rate": 0.00017,
"loss": 7.3548,
"step": 85
},
{
"epoch": 0.99,
"grad_norm": 16.40313028374367,
"learning_rate": 0.000172,
"loss": 7.3652,
"step": 86
},
{
"epoch": 1.0,
"grad_norm": 11.846265862818466,
"learning_rate": 0.000174,
"loss": 7.4363,
"step": 87
}
],
"logging_steps": 1,
"max_steps": 261,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 87,
"total_flos": 6.081355071671501e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}