vit-base-images / trainer_state.json
JYL480's picture
End of training
4185a7d verified
raw
history blame contribute delete
No virus
18.5 kB
{
"best_metric": 0.09177211672067642,
"best_model_checkpoint": "./vit-base-images/checkpoint-1000",
"epoch": 4.0,
"eval_steps": 100,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04,
"grad_norm": 2.1739912033081055,
"learning_rate": 0.00019800000000000002,
"loss": 1.2716,
"step": 10
},
{
"epoch": 0.08,
"grad_norm": 2.4669973850250244,
"learning_rate": 0.000196,
"loss": 1.0636,
"step": 20
},
{
"epoch": 0.12,
"grad_norm": 1.3152661323547363,
"learning_rate": 0.000194,
"loss": 0.8686,
"step": 30
},
{
"epoch": 0.16,
"grad_norm": 1.30520761013031,
"learning_rate": 0.000192,
"loss": 0.8709,
"step": 40
},
{
"epoch": 0.2,
"grad_norm": 3.462522506713867,
"learning_rate": 0.00019,
"loss": 0.8241,
"step": 50
},
{
"epoch": 0.24,
"grad_norm": 2.773179531097412,
"learning_rate": 0.000188,
"loss": 0.743,
"step": 60
},
{
"epoch": 0.28,
"grad_norm": 0.8740523457527161,
"learning_rate": 0.00018600000000000002,
"loss": 0.7111,
"step": 70
},
{
"epoch": 0.32,
"grad_norm": 3.179422378540039,
"learning_rate": 0.00018400000000000003,
"loss": 0.7327,
"step": 80
},
{
"epoch": 0.36,
"grad_norm": 2.8877387046813965,
"learning_rate": 0.000182,
"loss": 0.8438,
"step": 90
},
{
"epoch": 0.4,
"grad_norm": 2.021406888961792,
"learning_rate": 0.00018,
"loss": 0.8785,
"step": 100
},
{
"epoch": 0.4,
"eval_accuracy": 0.711,
"eval_loss": 0.779495358467102,
"eval_runtime": 14.6516,
"eval_samples_per_second": 68.252,
"eval_steps_per_second": 8.531,
"step": 100
},
{
"epoch": 0.44,
"grad_norm": 1.4613615274429321,
"learning_rate": 0.00017800000000000002,
"loss": 0.767,
"step": 110
},
{
"epoch": 0.48,
"grad_norm": 1.6331909894943237,
"learning_rate": 0.00017600000000000002,
"loss": 0.9023,
"step": 120
},
{
"epoch": 0.52,
"grad_norm": 1.2065010070800781,
"learning_rate": 0.000174,
"loss": 0.7347,
"step": 130
},
{
"epoch": 0.56,
"grad_norm": 1.609735369682312,
"learning_rate": 0.000172,
"loss": 0.5344,
"step": 140
},
{
"epoch": 0.6,
"grad_norm": 3.425642251968384,
"learning_rate": 0.00017,
"loss": 0.5897,
"step": 150
},
{
"epoch": 0.64,
"grad_norm": 0.7095292210578918,
"learning_rate": 0.000168,
"loss": 0.6487,
"step": 160
},
{
"epoch": 0.68,
"grad_norm": 1.6857857704162598,
"learning_rate": 0.000166,
"loss": 0.479,
"step": 170
},
{
"epoch": 0.72,
"grad_norm": 3.3737733364105225,
"learning_rate": 0.000164,
"loss": 0.6547,
"step": 180
},
{
"epoch": 0.76,
"grad_norm": 2.8827691078186035,
"learning_rate": 0.000162,
"loss": 0.667,
"step": 190
},
{
"epoch": 0.8,
"grad_norm": 0.998505711555481,
"learning_rate": 0.00016,
"loss": 0.7076,
"step": 200
},
{
"epoch": 0.8,
"eval_accuracy": 0.818,
"eval_loss": 0.5420999526977539,
"eval_runtime": 14.8716,
"eval_samples_per_second": 67.242,
"eval_steps_per_second": 8.405,
"step": 200
},
{
"epoch": 0.84,
"grad_norm": 1.6601933240890503,
"learning_rate": 0.00015800000000000002,
"loss": 0.5418,
"step": 210
},
{
"epoch": 0.88,
"grad_norm": 2.71398663520813,
"learning_rate": 0.00015600000000000002,
"loss": 0.6696,
"step": 220
},
{
"epoch": 0.92,
"grad_norm": 3.1173503398895264,
"learning_rate": 0.000154,
"loss": 0.5191,
"step": 230
},
{
"epoch": 0.96,
"grad_norm": 1.7604912519454956,
"learning_rate": 0.000152,
"loss": 0.5596,
"step": 240
},
{
"epoch": 1.0,
"grad_norm": 1.8370306491851807,
"learning_rate": 0.00015000000000000001,
"loss": 0.4941,
"step": 250
},
{
"epoch": 1.04,
"grad_norm": 3.0072226524353027,
"learning_rate": 0.000148,
"loss": 0.417,
"step": 260
},
{
"epoch": 1.08,
"grad_norm": 2.4395639896392822,
"learning_rate": 0.000146,
"loss": 0.5287,
"step": 270
},
{
"epoch": 1.12,
"grad_norm": 1.1612118482589722,
"learning_rate": 0.000144,
"loss": 0.4771,
"step": 280
},
{
"epoch": 1.16,
"grad_norm": 1.5060698986053467,
"learning_rate": 0.000142,
"loss": 0.4211,
"step": 290
},
{
"epoch": 1.2,
"grad_norm": 2.4940316677093506,
"learning_rate": 0.00014,
"loss": 0.4283,
"step": 300
},
{
"epoch": 1.2,
"eval_accuracy": 0.876,
"eval_loss": 0.3951060175895691,
"eval_runtime": 14.0948,
"eval_samples_per_second": 70.948,
"eval_steps_per_second": 8.869,
"step": 300
},
{
"epoch": 1.24,
"grad_norm": 3.643969774246216,
"learning_rate": 0.000138,
"loss": 0.4377,
"step": 310
},
{
"epoch": 1.28,
"grad_norm": 1.954455852508545,
"learning_rate": 0.00013600000000000003,
"loss": 0.4311,
"step": 320
},
{
"epoch": 1.32,
"grad_norm": 1.4906481504440308,
"learning_rate": 0.000134,
"loss": 0.3393,
"step": 330
},
{
"epoch": 1.3599999999999999,
"grad_norm": 2.0885210037231445,
"learning_rate": 0.000132,
"loss": 0.4909,
"step": 340
},
{
"epoch": 1.4,
"grad_norm": 4.9808173179626465,
"learning_rate": 0.00013000000000000002,
"loss": 0.4071,
"step": 350
},
{
"epoch": 1.44,
"grad_norm": 2.143996477127075,
"learning_rate": 0.00012800000000000002,
"loss": 0.2979,
"step": 360
},
{
"epoch": 1.48,
"grad_norm": 5.164979457855225,
"learning_rate": 0.000126,
"loss": 0.4576,
"step": 370
},
{
"epoch": 1.52,
"grad_norm": 6.777462482452393,
"learning_rate": 0.000124,
"loss": 0.446,
"step": 380
},
{
"epoch": 1.56,
"grad_norm": 1.1243666410446167,
"learning_rate": 0.000122,
"loss": 0.3795,
"step": 390
},
{
"epoch": 1.6,
"grad_norm": 3.663288116455078,
"learning_rate": 0.00012,
"loss": 0.4251,
"step": 400
},
{
"epoch": 1.6,
"eval_accuracy": 0.864,
"eval_loss": 0.38176068663597107,
"eval_runtime": 14.993,
"eval_samples_per_second": 66.698,
"eval_steps_per_second": 8.337,
"step": 400
},
{
"epoch": 1.6400000000000001,
"grad_norm": 2.137402296066284,
"learning_rate": 0.000118,
"loss": 0.4443,
"step": 410
},
{
"epoch": 1.6800000000000002,
"grad_norm": 3.767395257949829,
"learning_rate": 0.000116,
"loss": 0.5193,
"step": 420
},
{
"epoch": 1.72,
"grad_norm": 1.152035117149353,
"learning_rate": 0.00011399999999999999,
"loss": 0.3036,
"step": 430
},
{
"epoch": 1.76,
"grad_norm": 5.046035289764404,
"learning_rate": 0.00011200000000000001,
"loss": 0.2667,
"step": 440
},
{
"epoch": 1.8,
"grad_norm": 1.6602838039398193,
"learning_rate": 0.00011000000000000002,
"loss": 0.3035,
"step": 450
},
{
"epoch": 1.8399999999999999,
"grad_norm": 2.508718729019165,
"learning_rate": 0.00010800000000000001,
"loss": 0.4932,
"step": 460
},
{
"epoch": 1.88,
"grad_norm": 2.9392433166503906,
"learning_rate": 0.00010600000000000002,
"loss": 0.4191,
"step": 470
},
{
"epoch": 1.92,
"grad_norm": 2.791663885116577,
"learning_rate": 0.00010400000000000001,
"loss": 0.3038,
"step": 480
},
{
"epoch": 1.96,
"grad_norm": 2.3044326305389404,
"learning_rate": 0.00010200000000000001,
"loss": 0.4298,
"step": 490
},
{
"epoch": 2.0,
"grad_norm": 3.1489250659942627,
"learning_rate": 0.0001,
"loss": 0.335,
"step": 500
},
{
"epoch": 2.0,
"eval_accuracy": 0.924,
"eval_loss": 0.24736037850379944,
"eval_runtime": 15.0667,
"eval_samples_per_second": 66.372,
"eval_steps_per_second": 8.296,
"step": 500
},
{
"epoch": 2.04,
"grad_norm": 3.3036346435546875,
"learning_rate": 9.8e-05,
"loss": 0.1649,
"step": 510
},
{
"epoch": 2.08,
"grad_norm": 2.8792152404785156,
"learning_rate": 9.6e-05,
"loss": 0.1627,
"step": 520
},
{
"epoch": 2.12,
"grad_norm": 0.28887757658958435,
"learning_rate": 9.4e-05,
"loss": 0.3599,
"step": 530
},
{
"epoch": 2.16,
"grad_norm": 5.17996883392334,
"learning_rate": 9.200000000000001e-05,
"loss": 0.3233,
"step": 540
},
{
"epoch": 2.2,
"grad_norm": 2.1433322429656982,
"learning_rate": 9e-05,
"loss": 0.222,
"step": 550
},
{
"epoch": 2.24,
"grad_norm": 3.143852949142456,
"learning_rate": 8.800000000000001e-05,
"loss": 0.2713,
"step": 560
},
{
"epoch": 2.2800000000000002,
"grad_norm": 0.4215773344039917,
"learning_rate": 8.6e-05,
"loss": 0.1708,
"step": 570
},
{
"epoch": 2.32,
"grad_norm": 1.9217822551727295,
"learning_rate": 8.4e-05,
"loss": 0.198,
"step": 580
},
{
"epoch": 2.36,
"grad_norm": 2.4554295539855957,
"learning_rate": 8.2e-05,
"loss": 0.1617,
"step": 590
},
{
"epoch": 2.4,
"grad_norm": 1.2291343212127686,
"learning_rate": 8e-05,
"loss": 0.2286,
"step": 600
},
{
"epoch": 2.4,
"eval_accuracy": 0.952,
"eval_loss": 0.16752035915851593,
"eval_runtime": 13.8882,
"eval_samples_per_second": 72.003,
"eval_steps_per_second": 9.0,
"step": 600
},
{
"epoch": 2.44,
"grad_norm": 0.3957996964454651,
"learning_rate": 7.800000000000001e-05,
"loss": 0.1758,
"step": 610
},
{
"epoch": 2.48,
"grad_norm": 2.093458414077759,
"learning_rate": 7.6e-05,
"loss": 0.138,
"step": 620
},
{
"epoch": 2.52,
"grad_norm": 1.130835771560669,
"learning_rate": 7.4e-05,
"loss": 0.1194,
"step": 630
},
{
"epoch": 2.56,
"grad_norm": 1.3829611539840698,
"learning_rate": 7.2e-05,
"loss": 0.1629,
"step": 640
},
{
"epoch": 2.6,
"grad_norm": 0.17628225684165955,
"learning_rate": 7e-05,
"loss": 0.0954,
"step": 650
},
{
"epoch": 2.64,
"grad_norm": 1.7156352996826172,
"learning_rate": 6.800000000000001e-05,
"loss": 0.172,
"step": 660
},
{
"epoch": 2.68,
"grad_norm": 3.950498342514038,
"learning_rate": 6.6e-05,
"loss": 0.1414,
"step": 670
},
{
"epoch": 2.7199999999999998,
"grad_norm": 3.134085178375244,
"learning_rate": 6.400000000000001e-05,
"loss": 0.1734,
"step": 680
},
{
"epoch": 2.76,
"grad_norm": 4.362244129180908,
"learning_rate": 6.2e-05,
"loss": 0.1806,
"step": 690
},
{
"epoch": 2.8,
"grad_norm": 5.472875118255615,
"learning_rate": 6e-05,
"loss": 0.1523,
"step": 700
},
{
"epoch": 2.8,
"eval_accuracy": 0.954,
"eval_loss": 0.1640758216381073,
"eval_runtime": 14.3922,
"eval_samples_per_second": 69.482,
"eval_steps_per_second": 8.685,
"step": 700
},
{
"epoch": 2.84,
"grad_norm": 1.1265697479248047,
"learning_rate": 5.8e-05,
"loss": 0.1939,
"step": 710
},
{
"epoch": 2.88,
"grad_norm": 3.1513569355010986,
"learning_rate": 5.6000000000000006e-05,
"loss": 0.1369,
"step": 720
},
{
"epoch": 2.92,
"grad_norm": 0.14964208006858826,
"learning_rate": 5.4000000000000005e-05,
"loss": 0.1372,
"step": 730
},
{
"epoch": 2.96,
"grad_norm": 0.1409606784582138,
"learning_rate": 5.2000000000000004e-05,
"loss": 0.1503,
"step": 740
},
{
"epoch": 3.0,
"grad_norm": 0.1397903859615326,
"learning_rate": 5e-05,
"loss": 0.1655,
"step": 750
},
{
"epoch": 3.04,
"grad_norm": 0.6654576063156128,
"learning_rate": 4.8e-05,
"loss": 0.0662,
"step": 760
},
{
"epoch": 3.08,
"grad_norm": 0.11212094128131866,
"learning_rate": 4.600000000000001e-05,
"loss": 0.0362,
"step": 770
},
{
"epoch": 3.12,
"grad_norm": 0.8599291443824768,
"learning_rate": 4.4000000000000006e-05,
"loss": 0.0545,
"step": 780
},
{
"epoch": 3.16,
"grad_norm": 0.0967254787683487,
"learning_rate": 4.2e-05,
"loss": 0.0428,
"step": 790
},
{
"epoch": 3.2,
"grad_norm": 1.8969135284423828,
"learning_rate": 4e-05,
"loss": 0.1346,
"step": 800
},
{
"epoch": 3.2,
"eval_accuracy": 0.969,
"eval_loss": 0.11203579604625702,
"eval_runtime": 14.1593,
"eval_samples_per_second": 70.625,
"eval_steps_per_second": 8.828,
"step": 800
},
{
"epoch": 3.24,
"grad_norm": 0.32526883482933044,
"learning_rate": 3.8e-05,
"loss": 0.0576,
"step": 810
},
{
"epoch": 3.2800000000000002,
"grad_norm": 3.220393419265747,
"learning_rate": 3.6e-05,
"loss": 0.0532,
"step": 820
},
{
"epoch": 3.32,
"grad_norm": 0.09663262218236923,
"learning_rate": 3.4000000000000007e-05,
"loss": 0.0832,
"step": 830
},
{
"epoch": 3.36,
"grad_norm": 0.07815458625555038,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.0653,
"step": 840
},
{
"epoch": 3.4,
"grad_norm": 0.09688442200422287,
"learning_rate": 3e-05,
"loss": 0.0763,
"step": 850
},
{
"epoch": 3.44,
"grad_norm": 0.07274580001831055,
"learning_rate": 2.8000000000000003e-05,
"loss": 0.04,
"step": 860
},
{
"epoch": 3.48,
"grad_norm": 0.06821909546852112,
"learning_rate": 2.6000000000000002e-05,
"loss": 0.0778,
"step": 870
},
{
"epoch": 3.52,
"grad_norm": 1.8587623834609985,
"learning_rate": 2.4e-05,
"loss": 0.0563,
"step": 880
},
{
"epoch": 3.56,
"grad_norm": 0.09389644116163254,
"learning_rate": 2.2000000000000003e-05,
"loss": 0.1213,
"step": 890
},
{
"epoch": 3.6,
"grad_norm": 6.0940117835998535,
"learning_rate": 2e-05,
"loss": 0.0638,
"step": 900
},
{
"epoch": 3.6,
"eval_accuracy": 0.978,
"eval_loss": 0.10251828283071518,
"eval_runtime": 14.4118,
"eval_samples_per_second": 69.388,
"eval_steps_per_second": 8.673,
"step": 900
},
{
"epoch": 3.64,
"grad_norm": 0.1492830514907837,
"learning_rate": 1.8e-05,
"loss": 0.0821,
"step": 910
},
{
"epoch": 3.68,
"grad_norm": 0.07156217098236084,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.023,
"step": 920
},
{
"epoch": 3.7199999999999998,
"grad_norm": 0.07530596107244492,
"learning_rate": 1.4000000000000001e-05,
"loss": 0.0821,
"step": 930
},
{
"epoch": 3.76,
"grad_norm": 0.061225246638059616,
"learning_rate": 1.2e-05,
"loss": 0.0518,
"step": 940
},
{
"epoch": 3.8,
"grad_norm": 0.15823562443256378,
"learning_rate": 1e-05,
"loss": 0.0286,
"step": 950
},
{
"epoch": 3.84,
"grad_norm": 0.09221441298723221,
"learning_rate": 8.000000000000001e-06,
"loss": 0.0364,
"step": 960
},
{
"epoch": 3.88,
"grad_norm": 0.05617209151387215,
"learning_rate": 6e-06,
"loss": 0.0498,
"step": 970
},
{
"epoch": 3.92,
"grad_norm": 0.08119833469390869,
"learning_rate": 4.000000000000001e-06,
"loss": 0.049,
"step": 980
},
{
"epoch": 3.96,
"grad_norm": 0.08950098603963852,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0642,
"step": 990
},
{
"epoch": 4.0,
"grad_norm": 0.07529381662607193,
"learning_rate": 0.0,
"loss": 0.0574,
"step": 1000
},
{
"epoch": 4.0,
"eval_accuracy": 0.981,
"eval_loss": 0.09177211672067642,
"eval_runtime": 15.0707,
"eval_samples_per_second": 66.354,
"eval_steps_per_second": 8.294,
"step": 1000
},
{
"epoch": 4.0,
"step": 1000,
"total_flos": 1.239905171570688e+18,
"train_loss": 0.3459155881404877,
"train_runtime": 562.8746,
"train_samples_per_second": 28.426,
"train_steps_per_second": 1.777
}
],
"logging_steps": 10,
"max_steps": 1000,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.239905171570688e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}