|
{ |
|
"best_metric": 0.6862578988075256, |
|
"best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l56-l/checkpoint-15500", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 16284, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"grad_norm": 18.028329849243164, |
|
"learning_rate": 4.846475067550971e-06, |
|
"loss": 0.2253, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.09211495946941783, |
|
"eval_loss": 1.4693936109542847, |
|
"eval_runtime": 74.2802, |
|
"eval_samples_per_second": 16.249, |
|
"eval_steps_per_second": 2.033, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"grad_norm": 0.4609026312828064, |
|
"learning_rate": 4.692950135101941e-06, |
|
"loss": 0.2544, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.18422991893883567, |
|
"eval_loss": 1.5420819520950317, |
|
"eval_runtime": 74.0766, |
|
"eval_samples_per_second": 16.294, |
|
"eval_steps_per_second": 2.038, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"grad_norm": 71.96737670898438, |
|
"learning_rate": 4.5394252026529115e-06, |
|
"loss": 0.3236, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.2763448784082535, |
|
"eval_loss": 1.4642689228057861, |
|
"eval_runtime": 74.2336, |
|
"eval_samples_per_second": 16.259, |
|
"eval_steps_per_second": 2.034, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"grad_norm": 6.366127069945549e-10, |
|
"learning_rate": 4.385900270203882e-06, |
|
"loss": 0.1888, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.36845983787767134, |
|
"eval_loss": 1.3193777799606323, |
|
"eval_runtime": 76.1214, |
|
"eval_samples_per_second": 15.856, |
|
"eval_steps_per_second": 1.984, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"grad_norm": 452.8532409667969, |
|
"learning_rate": 4.232375337754851e-06, |
|
"loss": 0.2563, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.46057479734708917, |
|
"eval_loss": 1.37644362449646, |
|
"eval_runtime": 76.3265, |
|
"eval_samples_per_second": 15.814, |
|
"eval_steps_per_second": 1.978, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"grad_norm": 0.0002858435327652842, |
|
"learning_rate": 4.078850405305822e-06, |
|
"loss": 0.2794, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.552689756816507, |
|
"eval_loss": 1.3007307052612305, |
|
"eval_runtime": 76.3512, |
|
"eval_samples_per_second": 15.809, |
|
"eval_steps_per_second": 1.978, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"grad_norm": 0.324462890625, |
|
"learning_rate": 3.925325472856792e-06, |
|
"loss": 0.1749, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.6448047162859248, |
|
"eval_loss": 1.3360542058944702, |
|
"eval_runtime": 74.5379, |
|
"eval_samples_per_second": 16.193, |
|
"eval_steps_per_second": 2.026, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"grad_norm": 76.63452911376953, |
|
"learning_rate": 3.7718005404077624e-06, |
|
"loss": 0.2672, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.7369196757553427, |
|
"eval_loss": 1.268362283706665, |
|
"eval_runtime": 76.6253, |
|
"eval_samples_per_second": 15.752, |
|
"eval_steps_per_second": 1.971, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"grad_norm": 1.9390699863433838, |
|
"learning_rate": 3.6182756079587327e-06, |
|
"loss": 0.218, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.8290346352247605, |
|
"eval_loss": 1.1064503192901611, |
|
"eval_runtime": 75.0503, |
|
"eval_samples_per_second": 16.083, |
|
"eval_steps_per_second": 2.012, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"grad_norm": 3.37814665130054e-10, |
|
"learning_rate": 3.464750675509703e-06, |
|
"loss": 0.1665, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.9211495946941783, |
|
"eval_loss": 1.0620027780532837, |
|
"eval_runtime": 76.9535, |
|
"eval_samples_per_second": 15.685, |
|
"eval_steps_per_second": 1.962, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"grad_norm": 8.782347293845305e-09, |
|
"learning_rate": 3.311225743060673e-06, |
|
"loss": 0.1842, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.013264554163596, |
|
"eval_loss": 0.9443314075469971, |
|
"eval_runtime": 75.2095, |
|
"eval_samples_per_second": 16.049, |
|
"eval_steps_per_second": 2.008, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"grad_norm": 0.002741970354691148, |
|
"learning_rate": 3.1577008106116434e-06, |
|
"loss": 0.1183, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.105379513633014, |
|
"eval_loss": 0.943097710609436, |
|
"eval_runtime": 74.832, |
|
"eval_samples_per_second": 16.129, |
|
"eval_steps_per_second": 2.018, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"grad_norm": 9.241516818292439e-05, |
|
"learning_rate": 3.004175878162614e-06, |
|
"loss": 0.1066, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.1974944731024317, |
|
"eval_loss": 1.005082607269287, |
|
"eval_runtime": 74.7497, |
|
"eval_samples_per_second": 16.147, |
|
"eval_steps_per_second": 2.02, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"grad_norm": 3.972076889624532e-09, |
|
"learning_rate": 2.850650945713584e-06, |
|
"loss": 0.0901, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.2896094325718497, |
|
"eval_loss": 1.001724362373352, |
|
"eval_runtime": 74.9393, |
|
"eval_samples_per_second": 16.106, |
|
"eval_steps_per_second": 2.015, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"grad_norm": 0.6726647615432739, |
|
"learning_rate": 2.6971260132645544e-06, |
|
"loss": 0.112, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.3817243920412676, |
|
"eval_loss": 1.0029873847961426, |
|
"eval_runtime": 76.2263, |
|
"eval_samples_per_second": 15.834, |
|
"eval_steps_per_second": 1.981, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"grad_norm": 0.0007436455925926566, |
|
"learning_rate": 2.543601080815525e-06, |
|
"loss": 0.1265, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.4738393515106853, |
|
"eval_loss": 0.9462646842002869, |
|
"eval_runtime": 74.6539, |
|
"eval_samples_per_second": 16.168, |
|
"eval_steps_per_second": 2.023, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"grad_norm": 3.9008838825793646e-08, |
|
"learning_rate": 2.390076148366495e-06, |
|
"loss": 0.1193, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.565954310980103, |
|
"eval_loss": 1.0377594232559204, |
|
"eval_runtime": 74.635, |
|
"eval_samples_per_second": 16.172, |
|
"eval_steps_per_second": 2.023, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"grad_norm": 1.2957306353200693e-05, |
|
"learning_rate": 2.236551215917465e-06, |
|
"loss": 0.1318, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.658069270449521, |
|
"eval_loss": 0.9298674464225769, |
|
"eval_runtime": 74.5158, |
|
"eval_samples_per_second": 16.198, |
|
"eval_steps_per_second": 2.026, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"grad_norm": 2.964475670150235e-12, |
|
"learning_rate": 2.0830262834684354e-06, |
|
"loss": 0.1262, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.750184229918939, |
|
"eval_loss": 0.9714127779006958, |
|
"eval_runtime": 74.8339, |
|
"eval_samples_per_second": 16.129, |
|
"eval_steps_per_second": 2.018, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"grad_norm": 1.1816314327006694e-06, |
|
"learning_rate": 1.9295013510194058e-06, |
|
"loss": 0.101, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.8422991893883567, |
|
"eval_loss": 0.8753853440284729, |
|
"eval_runtime": 74.7629, |
|
"eval_samples_per_second": 16.144, |
|
"eval_steps_per_second": 2.02, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"grad_norm": 1.1530125298975236e-08, |
|
"learning_rate": 1.775976418570376e-06, |
|
"loss": 0.1158, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 1.9344141488577744, |
|
"eval_loss": 0.8074946999549866, |
|
"eval_runtime": 74.5296, |
|
"eval_samples_per_second": 16.195, |
|
"eval_steps_per_second": 2.026, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"grad_norm": 2.0235971232018812e-10, |
|
"learning_rate": 1.622451486121346e-06, |
|
"loss": 0.0656, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.026529108327192, |
|
"eval_loss": 0.8281168937683105, |
|
"eval_runtime": 76.1895, |
|
"eval_samples_per_second": 15.842, |
|
"eval_steps_per_second": 1.982, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"grad_norm": 27.123056411743164, |
|
"learning_rate": 1.4689265536723166e-06, |
|
"loss": 0.0854, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.1186440677966103, |
|
"eval_loss": 0.7755604982376099, |
|
"eval_runtime": 74.506, |
|
"eval_samples_per_second": 16.2, |
|
"eval_steps_per_second": 2.027, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"grad_norm": 0.017793377861380577, |
|
"learning_rate": 1.3154016212232868e-06, |
|
"loss": 0.0574, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.210759027266028, |
|
"eval_loss": 0.7431201934814453, |
|
"eval_runtime": 74.44, |
|
"eval_samples_per_second": 16.214, |
|
"eval_steps_per_second": 2.028, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"grad_norm": 5.614988327026367, |
|
"learning_rate": 1.1618766887742571e-06, |
|
"loss": 0.0643, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3028739867354457, |
|
"eval_loss": 0.7556394338607788, |
|
"eval_runtime": 76.2998, |
|
"eval_samples_per_second": 15.819, |
|
"eval_steps_per_second": 1.979, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"grad_norm": 0.1345616579055786, |
|
"learning_rate": 1.0083517563252273e-06, |
|
"loss": 0.0657, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.3949889462048635, |
|
"eval_loss": 0.7818958759307861, |
|
"eval_runtime": 74.4509, |
|
"eval_samples_per_second": 16.212, |
|
"eval_steps_per_second": 2.028, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.4871039056742816, |
|
"grad_norm": 2.2698461634718115e-06, |
|
"learning_rate": 8.548268238761975e-07, |
|
"loss": 0.0372, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.4871039056742816, |
|
"eval_loss": 0.768934428691864, |
|
"eval_runtime": 74.5573, |
|
"eval_samples_per_second": 16.189, |
|
"eval_steps_per_second": 2.025, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.5792188651436994, |
|
"grad_norm": 3.2547378540039062, |
|
"learning_rate": 7.013018914271679e-07, |
|
"loss": 0.0286, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.5792188651436994, |
|
"eval_loss": 0.7623304724693298, |
|
"eval_runtime": 74.5031, |
|
"eval_samples_per_second": 16.201, |
|
"eval_steps_per_second": 2.027, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 2.671333824613117, |
|
"grad_norm": 5.614094394446392e-14, |
|
"learning_rate": 5.477769589781381e-07, |
|
"loss": 0.0581, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.671333824613117, |
|
"eval_loss": 0.7251203060150146, |
|
"eval_runtime": 74.3803, |
|
"eval_samples_per_second": 16.227, |
|
"eval_steps_per_second": 2.03, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 2.7634487840825352, |
|
"grad_norm": 9.663383426783412e-10, |
|
"learning_rate": 3.942520265291084e-07, |
|
"loss": 0.0578, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.7634487840825352, |
|
"eval_loss": 0.6912627220153809, |
|
"eval_runtime": 74.5101, |
|
"eval_samples_per_second": 16.199, |
|
"eval_steps_per_second": 2.027, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 2.855563743551953, |
|
"grad_norm": 3.149430582993773e-08, |
|
"learning_rate": 2.407270940800786e-07, |
|
"loss": 0.0442, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.855563743551953, |
|
"eval_loss": 0.6862578988075256, |
|
"eval_runtime": 76.24, |
|
"eval_samples_per_second": 15.832, |
|
"eval_steps_per_second": 1.981, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 2.9476787030213707, |
|
"grad_norm": 0.040168143808841705, |
|
"learning_rate": 8.72021616310489e-08, |
|
"loss": 0.0304, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 2.9476787030213707, |
|
"eval_loss": 0.6905970573425293, |
|
"eval_runtime": 74.4868, |
|
"eval_samples_per_second": 16.204, |
|
"eval_steps_per_second": 2.027, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 16284, |
|
"total_flos": 5859024508620360.0, |
|
"train_loss": 0.13378913939599468, |
|
"train_runtime": 12342.3724, |
|
"train_samples_per_second": 2.639, |
|
"train_steps_per_second": 1.319 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 16284, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5859024508620360.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|