poa-recognition-v.5 / trainer_state.json
sv-adv's picture
Upload folder using huggingface_hub
30d62f8 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1261.8181818181818,
"eval_steps": 500,
"global_step": 3470,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 36.36363636363637,
"grad_norm": 3.4013822078704834,
"learning_rate": 0.00019994268257026118,
"loss": 3.996,
"step": 100
},
{
"epoch": 72.72727272727273,
"grad_norm": 5.365049362182617,
"learning_rate": 0.000199766047623841,
"loss": 1.3298,
"step": 200
},
{
"epoch": 109.0909090909091,
"grad_norm": 11.665435791015625,
"learning_rate": 0.00019946668751528744,
"loss": 0.727,
"step": 300
},
{
"epoch": 145.45454545454547,
"grad_norm": 4.6548357009887695,
"learning_rate": 0.0001990509463238309,
"loss": 0.523,
"step": 400
},
{
"epoch": 181.8181818181818,
"grad_norm": 5.07234525680542,
"learning_rate": 0.0001985109326154774,
"loss": 0.3655,
"step": 500
},
{
"epoch": 218.1818181818182,
"grad_norm": 7.260560035705566,
"learning_rate": 0.00019785089851017787,
"loss": 0.273,
"step": 600
},
{
"epoch": 254.54545454545453,
"grad_norm": 4.73112154006958,
"learning_rate": 0.00019707164815781908,
"loss": 0.2198,
"step": 700
},
{
"epoch": 290.90909090909093,
"grad_norm": 0.689785361289978,
"learning_rate": 0.00019617413095492114,
"loss": 0.1656,
"step": 800
},
{
"epoch": 327.27272727272725,
"grad_norm": 3.2497429847717285,
"learning_rate": 0.00019515944038794384,
"loss": 0.1227,
"step": 900
},
{
"epoch": 363.6363636363636,
"grad_norm": 8.12833309173584,
"learning_rate": 0.0001940288127010419,
"loss": 0.0802,
"step": 1000
},
{
"epoch": 400.0,
"grad_norm": 5.075193405151367,
"learning_rate": 0.000192783625389892,
"loss": 0.0617,
"step": 1100
},
{
"epoch": 436.3636363636364,
"grad_norm": 2.8493974208831787,
"learning_rate": 0.00019142539552342638,
"loss": 0.0404,
"step": 1200
},
{
"epoch": 472.72727272727275,
"grad_norm": 0.399513840675354,
"learning_rate": 0.00018995577789551803,
"loss": 0.0327,
"step": 1300
},
{
"epoch": 509.09090909090907,
"grad_norm": 1.5895440578460693,
"learning_rate": 0.00018837656300886937,
"loss": 0.0355,
"step": 1400
},
{
"epoch": 545.4545454545455,
"grad_norm": 0.0846463143825531,
"learning_rate": 0.00018668967489356028,
"loss": 0.0127,
"step": 1500
},
{
"epoch": 581.8181818181819,
"grad_norm": 0.04372192919254303,
"learning_rate": 0.00018489716876291415,
"loss": 0.0203,
"step": 1600
},
{
"epoch": 618.1818181818181,
"grad_norm": 0.09042660892009735,
"learning_rate": 0.00018300122850953675,
"loss": 0.0125,
"step": 1700
},
{
"epoch": 654.5454545454545,
"grad_norm": 0.06565279513597488,
"learning_rate": 0.00018100416404457961,
"loss": 0.0124,
"step": 1800
},
{
"epoch": 690.9090909090909,
"grad_norm": 0.4747157692909241,
"learning_rate": 0.00017890840848346908,
"loss": 0.0159,
"step": 1900
},
{
"epoch": 727.2727272727273,
"grad_norm": 0.011907286942005157,
"learning_rate": 0.00017671651518153,
"loss": 0.0092,
"step": 2000
},
{
"epoch": 763.6363636363636,
"grad_norm": 0.16101345419883728,
"learning_rate": 0.0001744311546231154,
"loss": 0.0085,
"step": 2100
},
{
"epoch": 800.0,
"grad_norm": 0.4659230411052704,
"learning_rate": 0.00017205511116803306,
"loss": 0.0084,
"step": 2200
},
{
"epoch": 836.3636363636364,
"grad_norm": 0.026558930054306984,
"learning_rate": 0.00016959127965923142,
"loss": 0.011,
"step": 2300
},
{
"epoch": 872.7272727272727,
"grad_norm": 0.07431953400373459,
"learning_rate": 0.0001670426618958799,
"loss": 0.012,
"step": 2400
},
{
"epoch": 909.0909090909091,
"grad_norm": 0.17360587418079376,
"learning_rate": 0.00016441236297613866,
"loss": 0.0102,
"step": 2500
},
{
"epoch": 945.4545454545455,
"grad_norm": 0.031083501875400543,
"learning_rate": 0.00016173105296858452,
"loss": 0.0068,
"step": 2600
},
{
"epoch": 981.8181818181819,
"grad_norm": 0.3104077875614166,
"learning_rate": 0.00015894783631276567,
"loss": 0.0053,
"step": 2700
},
{
"epoch": 1018.1818181818181,
"grad_norm": 0.002124856458976865,
"learning_rate": 0.0001560928007986178,
"loss": 0.0062,
"step": 2800
},
{
"epoch": 1054.5454545454545,
"grad_norm": 0.005405977368354797,
"learning_rate": 0.0001531694248471304,
"loss": 0.0033,
"step": 2900
},
{
"epoch": 1090.909090909091,
"grad_norm": 0.008714040741324425,
"learning_rate": 0.00015018127014158886,
"loss": 0.0071,
"step": 3000
},
{
"epoch": 1127.2727272727273,
"grad_norm": 0.04300049692392349,
"learning_rate": 0.00014713197728821183,
"loss": 0.0072,
"step": 3100
},
{
"epoch": 1163.6363636363637,
"grad_norm": 0.00944207701832056,
"learning_rate": 0.00014402526138063373,
"loss": 0.006,
"step": 3200
},
{
"epoch": 1200.0,
"grad_norm": 0.004597234074026346,
"learning_rate": 0.0001408967639298663,
"loss": 0.0138,
"step": 3300
},
{
"epoch": 1236.3636363636363,
"grad_norm": 0.07871837168931961,
"learning_rate": 0.00013768710104121627,
"loss": 0.0069,
"step": 3400
}
],
"logging_steps": 100,
"max_steps": 9000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4500,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.0175353452509184e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}