|
{ |
|
"best_metric": 1.5942937135696411, |
|
"best_model_checkpoint": "detr-r101-cd45rb-8ah-6l-256d-4096ffn-correcetd\\checkpoint-92120", |
|
"epoch": 20.0, |
|
"global_step": 92120, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"learning_rate": 9.501085540599219e-06, |
|
"loss": 2.3194, |
|
"step": 4606 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 1.7874349355697632, |
|
"eval_runtime": 255.9695, |
|
"eval_samples_per_second": 6.958, |
|
"eval_steps_per_second": 0.871, |
|
"step": 4606 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"learning_rate": 9.001085540599219e-06, |
|
"loss": 2.3461, |
|
"step": 9212 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 1.8375755548477173, |
|
"eval_runtime": 255.8256, |
|
"eval_samples_per_second": 6.962, |
|
"eval_steps_per_second": 0.872, |
|
"step": 9212 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"learning_rate": 8.501302648719063e-06, |
|
"loss": 2.3028, |
|
"step": 13818 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 1.9087122678756714, |
|
"eval_runtime": 255.4494, |
|
"eval_samples_per_second": 6.972, |
|
"eval_steps_per_second": 0.873, |
|
"step": 13818 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"learning_rate": 8.001628310898828e-06, |
|
"loss": 2.2979, |
|
"step": 18424 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_loss": 1.906615972518921, |
|
"eval_runtime": 255.8122, |
|
"eval_samples_per_second": 6.962, |
|
"eval_steps_per_second": 0.872, |
|
"step": 18424 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"learning_rate": 7.501736864958749e-06, |
|
"loss": 2.273, |
|
"step": 23030 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_loss": 1.7688573598861694, |
|
"eval_runtime": 223.3286, |
|
"eval_samples_per_second": 7.975, |
|
"eval_steps_per_second": 0.999, |
|
"step": 23030 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"learning_rate": 7.001845419018671e-06, |
|
"loss": 2.2372, |
|
"step": 27636 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_loss": 1.850079894065857, |
|
"eval_runtime": 222.161, |
|
"eval_samples_per_second": 8.017, |
|
"eval_steps_per_second": 1.004, |
|
"step": 27636 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"learning_rate": 6.501953973078594e-06, |
|
"loss": 2.2429, |
|
"step": 32242 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_loss": 1.7625480890274048, |
|
"eval_runtime": 221.7461, |
|
"eval_samples_per_second": 8.032, |
|
"eval_steps_per_second": 1.006, |
|
"step": 32242 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"learning_rate": 6.0021710811984364e-06, |
|
"loss": 2.2066, |
|
"step": 36848 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_loss": 1.7588337659835815, |
|
"eval_runtime": 221.9488, |
|
"eval_samples_per_second": 8.024, |
|
"eval_steps_per_second": 1.005, |
|
"step": 36848 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"learning_rate": 5.5023881893182805e-06, |
|
"loss": 2.1914, |
|
"step": 41454 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_loss": 1.7061983346939087, |
|
"eval_runtime": 260.572, |
|
"eval_samples_per_second": 6.835, |
|
"eval_steps_per_second": 0.856, |
|
"step": 41454 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"learning_rate": 5.002605297438125e-06, |
|
"loss": 2.1553, |
|
"step": 46060 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_loss": 1.706916093826294, |
|
"eval_runtime": 254.0051, |
|
"eval_samples_per_second": 7.012, |
|
"eval_steps_per_second": 0.878, |
|
"step": 46060 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"learning_rate": 4.502822405557969e-06, |
|
"loss": 2.1302, |
|
"step": 50666 |
|
}, |
|
{ |
|
"epoch": 11.0, |
|
"eval_loss": 1.6863188743591309, |
|
"eval_runtime": 253.207, |
|
"eval_samples_per_second": 7.034, |
|
"eval_steps_per_second": 0.881, |
|
"step": 50666 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"learning_rate": 4.003039513677812e-06, |
|
"loss": 2.1321, |
|
"step": 55272 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_loss": 1.7721848487854004, |
|
"eval_runtime": 253.9074, |
|
"eval_samples_per_second": 7.014, |
|
"eval_steps_per_second": 0.878, |
|
"step": 55272 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"learning_rate": 3.503148067737734e-06, |
|
"loss": 2.1227, |
|
"step": 59878 |
|
}, |
|
{ |
|
"epoch": 13.0, |
|
"eval_loss": 1.6639100313186646, |
|
"eval_runtime": 254.2906, |
|
"eval_samples_per_second": 7.004, |
|
"eval_steps_per_second": 0.877, |
|
"step": 59878 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"learning_rate": 3.003365175857577e-06, |
|
"loss": 2.087, |
|
"step": 64484 |
|
}, |
|
{ |
|
"epoch": 14.0, |
|
"eval_loss": 1.6486269235610962, |
|
"eval_runtime": 254.2198, |
|
"eval_samples_per_second": 7.006, |
|
"eval_steps_per_second": 0.877, |
|
"step": 64484 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"learning_rate": 2.5036908380373426e-06, |
|
"loss": 2.0676, |
|
"step": 69090 |
|
}, |
|
{ |
|
"epoch": 15.0, |
|
"eval_loss": 1.6632195711135864, |
|
"eval_runtime": 253.8344, |
|
"eval_samples_per_second": 7.016, |
|
"eval_steps_per_second": 0.879, |
|
"step": 69090 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"learning_rate": 2.0036908380373427e-06, |
|
"loss": 2.0604, |
|
"step": 73696 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_loss": 1.6428617238998413, |
|
"eval_runtime": 252.8055, |
|
"eval_samples_per_second": 7.045, |
|
"eval_steps_per_second": 0.882, |
|
"step": 73696 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"learning_rate": 1.5039079461571866e-06, |
|
"loss": 2.0435, |
|
"step": 78302 |
|
}, |
|
{ |
|
"epoch": 17.0, |
|
"eval_loss": 1.6223891973495483, |
|
"eval_runtime": 252.2863, |
|
"eval_samples_per_second": 7.059, |
|
"eval_steps_per_second": 0.884, |
|
"step": 78302 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"learning_rate": 1.004233608336952e-06, |
|
"loss": 2.0241, |
|
"step": 82908 |
|
}, |
|
{ |
|
"epoch": 18.0, |
|
"eval_loss": 1.632068157196045, |
|
"eval_runtime": 255.263, |
|
"eval_samples_per_second": 6.977, |
|
"eval_steps_per_second": 0.874, |
|
"step": 82908 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"learning_rate": 5.044507164567955e-07, |
|
"loss": 2.0041, |
|
"step": 87514 |
|
}, |
|
{ |
|
"epoch": 19.0, |
|
"eval_loss": 1.5970158576965332, |
|
"eval_runtime": 252.5106, |
|
"eval_samples_per_second": 7.053, |
|
"eval_steps_per_second": 0.883, |
|
"step": 87514 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"learning_rate": 4.559270516717325e-09, |
|
"loss": 1.9966, |
|
"step": 92120 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_loss": 1.5942937135696411, |
|
"eval_runtime": 253.5474, |
|
"eval_samples_per_second": 7.024, |
|
"eval_steps_per_second": 0.88, |
|
"step": 92120 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"step": 92120, |
|
"total_flos": 3.09958520212224e+20, |
|
"train_loss": 2.1620497868778497, |
|
"train_runtime": 79274.0231, |
|
"train_samples_per_second": 4.647, |
|
"train_steps_per_second": 1.162 |
|
} |
|
], |
|
"max_steps": 92120, |
|
"num_train_epochs": 20, |
|
"total_flos": 3.09958520212224e+20, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|