|
{ |
|
"best_metric": 0.9696969696969697, |
|
"best_model_checkpoint": "vit-large-patch16-224-in21k-dungeon-geo-morphs-denoised-04Dec24-001/checkpoint-60", |
|
"epoch": 32.0, |
|
"eval_steps": 10, |
|
"global_step": 80, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 16.180925369262695, |
|
"learning_rate": 1.9444444444444445e-05, |
|
"loss": 1.3958, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.8383838383838383, |
|
"eval_loss": 0.9618854522705078, |
|
"eval_runtime": 6.2572, |
|
"eval_samples_per_second": 79.109, |
|
"eval_steps_per_second": 9.909, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 6.994846820831299, |
|
"learning_rate": 1.6666666666666667e-05, |
|
"loss": 0.4596, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.9333333333333333, |
|
"eval_loss": 0.3876033425331116, |
|
"eval_runtime": 6.6276, |
|
"eval_samples_per_second": 74.687, |
|
"eval_steps_per_second": 9.355, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 2.357105016708374, |
|
"learning_rate": 1.388888888888889e-05, |
|
"loss": 0.1012, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9616161616161616, |
|
"eval_loss": 0.1927802562713623, |
|
"eval_runtime": 6.8814, |
|
"eval_samples_per_second": 71.933, |
|
"eval_steps_per_second": 9.01, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 0.5583979487419128, |
|
"learning_rate": 1.1111111111111113e-05, |
|
"loss": 0.022, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9636363636363636, |
|
"eval_loss": 0.11810477077960968, |
|
"eval_runtime": 6.7616, |
|
"eval_samples_per_second": 73.207, |
|
"eval_steps_per_second": 9.169, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 0.18477365374565125, |
|
"learning_rate": 8.333333333333334e-06, |
|
"loss": 0.0066, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9676767676767677, |
|
"eval_loss": 0.09357820451259613, |
|
"eval_runtime": 6.4967, |
|
"eval_samples_per_second": 76.192, |
|
"eval_steps_per_second": 9.543, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"grad_norm": 0.11407925933599472, |
|
"learning_rate": 5.555555555555557e-06, |
|
"loss": 0.0036, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.9696969696969697, |
|
"eval_loss": 0.0862530842423439, |
|
"eval_runtime": 6.5373, |
|
"eval_samples_per_second": 75.72, |
|
"eval_steps_per_second": 9.484, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"grad_norm": 0.09305387735366821, |
|
"learning_rate": 2.7777777777777783e-06, |
|
"loss": 0.0028, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.9696969696969697, |
|
"eval_loss": 0.08481135219335556, |
|
"eval_runtime": 7.1926, |
|
"eval_samples_per_second": 68.821, |
|
"eval_steps_per_second": 8.62, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"grad_norm": 0.08653579652309418, |
|
"learning_rate": 0.0, |
|
"loss": 0.0025, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.9696969696969697, |
|
"eval_loss": 0.08255013823509216, |
|
"eval_runtime": 6.77, |
|
"eval_samples_per_second": 73.117, |
|
"eval_steps_per_second": 9.158, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"step": 80, |
|
"total_flos": 7.012786101918106e+17, |
|
"train_loss": 0.24926918111741542, |
|
"train_runtime": 387.0012, |
|
"train_samples_per_second": 8.269, |
|
"train_steps_per_second": 0.207 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 80, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 40, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.012786101918106e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|