File size: 4,002 Bytes
f89ccf1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
{
"best_metric": 0.9886363636363636,
"best_model_checkpoint": "swin-large-patch4-window12-384-in22k-finetuned-batch8/checkpoint-98",
"epoch": 2.984771573604061,
"eval_steps": 500,
"global_step": 147,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.20304568527918782,
"grad_norm": 6.88034200668335,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.643,
"step": 10
},
{
"epoch": 0.40609137055837563,
"grad_norm": 19.021066665649414,
"learning_rate": 4.810606060606061e-05,
"loss": 0.3831,
"step": 20
},
{
"epoch": 0.6091370558375635,
"grad_norm": 9.288772583007812,
"learning_rate": 4.431818181818182e-05,
"loss": 0.1721,
"step": 30
},
{
"epoch": 0.8121827411167513,
"grad_norm": 17.481571197509766,
"learning_rate": 4.053030303030303e-05,
"loss": 0.2123,
"step": 40
},
{
"epoch": 0.9949238578680203,
"eval_accuracy": 0.9659090909090909,
"eval_loss": 0.09163513779640198,
"eval_runtime": 15.1203,
"eval_samples_per_second": 11.64,
"eval_steps_per_second": 1.455,
"step": 49
},
{
"epoch": 1.015228426395939,
"grad_norm": 8.028657913208008,
"learning_rate": 3.6742424242424246e-05,
"loss": 0.2193,
"step": 50
},
{
"epoch": 1.218274111675127,
"grad_norm": 14.979452133178711,
"learning_rate": 3.295454545454545e-05,
"loss": 0.2239,
"step": 60
},
{
"epoch": 1.4213197969543148,
"grad_norm": 8.288687705993652,
"learning_rate": 2.916666666666667e-05,
"loss": 0.1636,
"step": 70
},
{
"epoch": 1.6243654822335025,
"grad_norm": 9.035058975219727,
"learning_rate": 2.537878787878788e-05,
"loss": 0.1848,
"step": 80
},
{
"epoch": 1.8274111675126905,
"grad_norm": 18.96419906616211,
"learning_rate": 2.1590909090909093e-05,
"loss": 0.1613,
"step": 90
},
{
"epoch": 1.9898477157360406,
"eval_accuracy": 0.9886363636363636,
"eval_loss": 0.04295675456523895,
"eval_runtime": 15.0722,
"eval_samples_per_second": 11.677,
"eval_steps_per_second": 1.46,
"step": 98
},
{
"epoch": 2.030456852791878,
"grad_norm": 5.229619979858398,
"learning_rate": 1.7803030303030303e-05,
"loss": 0.1375,
"step": 100
},
{
"epoch": 2.233502538071066,
"grad_norm": 6.713926792144775,
"learning_rate": 1.4015151515151515e-05,
"loss": 0.1352,
"step": 110
},
{
"epoch": 2.436548223350254,
"grad_norm": 14.589942932128906,
"learning_rate": 1.0227272727272729e-05,
"loss": 0.1208,
"step": 120
},
{
"epoch": 2.6395939086294415,
"grad_norm": 1.9984550476074219,
"learning_rate": 6.43939393939394e-06,
"loss": 0.107,
"step": 130
},
{
"epoch": 2.8426395939086295,
"grad_norm": 10.917881965637207,
"learning_rate": 2.651515151515152e-06,
"loss": 0.116,
"step": 140
},
{
"epoch": 2.984771573604061,
"eval_accuracy": 0.9886363636363636,
"eval_loss": 0.034594591706991196,
"eval_runtime": 15.0628,
"eval_samples_per_second": 11.684,
"eval_steps_per_second": 1.461,
"step": 147
},
{
"epoch": 2.984771573604061,
"step": 147,
"total_flos": 2.437168844903547e+18,
"train_loss": 0.20754414107523808,
"train_runtime": 1195.5632,
"train_samples_per_second": 3.955,
"train_steps_per_second": 0.123
}
],
"logging_steps": 10,
"max_steps": 147,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 2.437168844903547e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|