|
{ |
|
"best_metric": 0.8074946999549866, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l56-l/checkpoint-10500", |
|
"epoch": 2.026529108327192, |
|
"eval_steps": 500, |
|
"global_step": 11000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 18.028329849243164, |
|
"learning_rate": 4.846475067550971e-06, |
|
"loss": 0.2253, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.4693936109542847, |
|
"eval_runtime": 74.2802, |
|
"eval_samples_per_second": 16.249, |
|
"eval_steps_per_second": 2.033, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 0.4609026312828064, |
|
"learning_rate": 4.692950135101941e-06, |
|
"loss": 0.2544, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 1.5420819520950317, |
|
"eval_runtime": 74.0766, |
|
"eval_samples_per_second": 16.294, |
|
"eval_steps_per_second": 2.038, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 71.96737670898438, |
|
"learning_rate": 4.5394252026529115e-06, |
|
"loss": 0.3236, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 1.4642689228057861, |
|
"eval_runtime": 74.2336, |
|
"eval_samples_per_second": 16.259, |
|
"eval_steps_per_second": 2.034, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 6.366127069945549e-10, |
|
"learning_rate": 4.385900270203882e-06, |
|
"loss": 0.1888, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 1.3193777799606323, |
|
"eval_runtime": 76.1214, |
|
"eval_samples_per_second": 15.856, |
|
"eval_steps_per_second": 1.984, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 452.8532409667969, |
|
"learning_rate": 4.232375337754851e-06, |
|
"loss": 0.2563, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 1.37644362449646, |
|
"eval_runtime": 76.3265, |
|
"eval_samples_per_second": 15.814, |
|
"eval_steps_per_second": 1.978, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 0.0002858435327652842, |
|
"learning_rate": 4.078850405305822e-06, |
|
"loss": 0.2794, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 1.3007307052612305, |
|
"eval_runtime": 76.3512, |
|
"eval_samples_per_second": 15.809, |
|
"eval_steps_per_second": 1.978, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 0.324462890625, |
|
"learning_rate": 3.925325472856792e-06, |
|
"loss": 0.1749, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 1.3360542058944702, |
|
"eval_runtime": 74.5379, |
|
"eval_samples_per_second": 16.193, |
|
"eval_steps_per_second": 2.026, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 76.63452911376953, |
|
"learning_rate": 3.7718005404077624e-06, |
|
"loss": 0.2672, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 1.268362283706665, |
|
"eval_runtime": 76.6253, |
|
"eval_samples_per_second": 15.752, |
|
"eval_steps_per_second": 1.971, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 1.9390699863433838, |
|
"learning_rate": 3.6182756079587327e-06, |
|
"loss": 0.218, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 1.1064503192901611, |
|
"eval_runtime": 75.0503, |
|
"eval_samples_per_second": 16.083, |
|
"eval_steps_per_second": 2.012, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 3.37814665130054e-10, |
|
"learning_rate": 3.464750675509703e-06, |
|
"loss": 0.1665, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 1.0620027780532837, |
|
"eval_runtime": 76.9535, |
|
"eval_samples_per_second": 15.685, |
|
"eval_steps_per_second": 1.962, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 8.782347293845305e-09, |
|
"learning_rate": 3.311225743060673e-06, |
|
"loss": 0.1842, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 0.9443314075469971, |
|
"eval_runtime": 75.2095, |
|
"eval_samples_per_second": 16.049, |
|
"eval_steps_per_second": 2.008, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 0.002741970354691148, |
|
"learning_rate": 3.1577008106116434e-06, |
|
"loss": 0.1183, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 0.943097710609436, |
|
"eval_runtime": 74.832, |
|
"eval_samples_per_second": 16.129, |
|
"eval_steps_per_second": 2.018, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 9.241516818292439e-05, |
|
"learning_rate": 3.004175878162614e-06, |
|
"loss": 0.1066, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 1.005082607269287, |
|
"eval_runtime": 74.7497, |
|
"eval_samples_per_second": 16.147, |
|
"eval_steps_per_second": 2.02, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"grad_norm": 3.972076889624532e-09, |
|
"learning_rate": 2.850650945713584e-06, |
|
"loss": 0.0901, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"eval_loss": 1.001724362373352, |
|
"eval_runtime": 74.9393, |
|
"eval_samples_per_second": 16.106, |
|
"eval_steps_per_second": 2.015, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"grad_norm": 0.6726647615432739, |
|
"learning_rate": 2.6971260132645544e-06, |
|
"loss": 0.112, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"eval_loss": 1.0029873847961426, |
|
"eval_runtime": 76.2263, |
|
"eval_samples_per_second": 15.834, |
|
"eval_steps_per_second": 1.981, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"grad_norm": 0.0007436455925926566, |
|
"learning_rate": 2.543601080815525e-06, |
|
"loss": 0.1265, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"eval_loss": 0.9462646842002869, |
|
"eval_runtime": 74.6539, |
|
"eval_samples_per_second": 16.168, |
|
"eval_steps_per_second": 2.023, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"grad_norm": 3.9008838825793646e-08, |
|
"learning_rate": 2.390076148366495e-06, |
|
"loss": 0.1193, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"eval_loss": 1.0377594232559204, |
|
"eval_runtime": 74.635, |
|
"eval_samples_per_second": 16.172, |
|
"eval_steps_per_second": 2.023, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"grad_norm": 1.2957306353200693e-05, |
|
"learning_rate": 2.236551215917465e-06, |
|
"loss": 0.1318, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"eval_loss": 0.9298674464225769, |
|
"eval_runtime": 74.5158, |
|
"eval_samples_per_second": 16.198, |
|
"eval_steps_per_second": 2.026, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"grad_norm": 2.964475670150235e-12, |
|
"learning_rate": 2.0830262834684354e-06, |
|
"loss": 0.1262, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"eval_loss": 0.9714127779006958, |
|
"eval_runtime": 74.8339, |
|
"eval_samples_per_second": 16.129, |
|
"eval_steps_per_second": 2.018, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"grad_norm": 1.1816314327006694e-06, |
|
"learning_rate": 1.9295013510194058e-06, |
|
"loss": 0.101, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"eval_loss": 0.8753853440284729, |
|
"eval_runtime": 74.7629, |
|
"eval_samples_per_second": 16.144, |
|
"eval_steps_per_second": 2.02, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"grad_norm": 1.1530125298975236e-08, |
|
"learning_rate": 1.775976418570376e-06, |
|
"loss": 0.1158, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"eval_loss": 0.8074946999549866, |
|
"eval_runtime": 74.5296, |
|
"eval_samples_per_second": 16.195, |
|
"eval_steps_per_second": 2.026, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"grad_norm": 2.0235971232018812e-10, |
|
"learning_rate": 1.622451486121346e-06, |
|
"loss": 0.0656, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"eval_loss": 0.8281168937683105, |
|
"eval_runtime": 76.1895, |
|
"eval_samples_per_second": 15.842, |
|
"eval_steps_per_second": 1.982, |
|
"step": 11000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16284, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3958061380562520.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|