|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.07222824124232576, |
|
"eval_steps": 5, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.001444564824846515, |
|
"eval_loss": 12.318426132202148, |
|
"eval_runtime": 452.8191, |
|
"eval_samples_per_second": 2.575, |
|
"eval_steps_per_second": 0.322, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004333694474539545, |
|
"grad_norm": 25.538225173950195, |
|
"learning_rate": 3e-05, |
|
"loss": 49.8181, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007222824124232575, |
|
"eval_loss": 10.13512134552002, |
|
"eval_runtime": 454.3185, |
|
"eval_samples_per_second": 2.566, |
|
"eval_steps_per_second": 0.321, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00866738894907909, |
|
"grad_norm": 48.99589920043945, |
|
"learning_rate": 6e-05, |
|
"loss": 44.5425, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.013001083423618635, |
|
"grad_norm": 41.58720779418945, |
|
"learning_rate": 9e-05, |
|
"loss": 23.7904, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01444564824846515, |
|
"eval_loss": 0.9781672954559326, |
|
"eval_runtime": 453.9838, |
|
"eval_samples_per_second": 2.568, |
|
"eval_steps_per_second": 0.322, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01733477789815818, |
|
"grad_norm": 18.518259048461914, |
|
"learning_rate": 9.938441702975689e-05, |
|
"loss": 4.2303, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.021668472372697724, |
|
"grad_norm": 0.1313701719045639, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 0.0983, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.021668472372697724, |
|
"eval_loss": 0.022199353203177452, |
|
"eval_runtime": 454.0297, |
|
"eval_samples_per_second": 2.568, |
|
"eval_steps_per_second": 0.322, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02600216684723727, |
|
"grad_norm": 0.12492942065000534, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.0015, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0288912964969303, |
|
"eval_loss": 0.07847809791564941, |
|
"eval_runtime": 454.0899, |
|
"eval_samples_per_second": 2.568, |
|
"eval_steps_per_second": 0.322, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.030335861321776816, |
|
"grad_norm": 18.6055908203125, |
|
"learning_rate": 8.247240241650918e-05, |
|
"loss": 0.2287, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03466955579631636, |
|
"grad_norm": 3.176828145980835, |
|
"learning_rate": 7.269952498697734e-05, |
|
"loss": 0.007, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.03611412062116288, |
|
"eval_loss": 0.0683816447854042, |
|
"eval_runtime": 453.9578, |
|
"eval_samples_per_second": 2.569, |
|
"eval_steps_per_second": 0.322, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0390032502708559, |
|
"grad_norm": 0.03669508546590805, |
|
"learning_rate": 6.167226819279528e-05, |
|
"loss": 0.0005, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.04333694474539545, |
|
"grad_norm": 0.5376744270324707, |
|
"learning_rate": 5e-05, |
|
"loss": 0.6832, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04333694474539545, |
|
"eval_loss": 0.024126915261149406, |
|
"eval_runtime": 454.0942, |
|
"eval_samples_per_second": 2.568, |
|
"eval_steps_per_second": 0.322, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.047670639219934995, |
|
"grad_norm": 0.025675935670733452, |
|
"learning_rate": 3.832773180720475e-05, |
|
"loss": 0.0004, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.05055976886962803, |
|
"eval_loss": 0.018978465348482132, |
|
"eval_runtime": 454.2548, |
|
"eval_samples_per_second": 2.567, |
|
"eval_steps_per_second": 0.321, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.05200433369447454, |
|
"grad_norm": 9.978776931762695, |
|
"learning_rate": 2.7300475013022663e-05, |
|
"loss": 0.0128, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.056338028169014086, |
|
"grad_norm": 0.01156551018357277, |
|
"learning_rate": 1.7527597583490822e-05, |
|
"loss": 0.1589, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.0577825929938606, |
|
"eval_loss": 0.0023998336400836706, |
|
"eval_runtime": 453.9056, |
|
"eval_samples_per_second": 2.569, |
|
"eval_steps_per_second": 0.322, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06067172264355363, |
|
"grad_norm": 1.0478241443634033, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.003, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.06500541711809317, |
|
"grad_norm": 0.04418126866221428, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 0.001, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06500541711809317, |
|
"eval_loss": 0.0003434412938077003, |
|
"eval_runtime": 454.0191, |
|
"eval_samples_per_second": 2.568, |
|
"eval_steps_per_second": 0.322, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.06933911159263272, |
|
"grad_norm": 0.06964542716741562, |
|
"learning_rate": 6.15582970243117e-07, |
|
"loss": 0.0012, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.07222824124232576, |
|
"eval_loss": 0.0003464781621005386, |
|
"eval_runtime": 453.9994, |
|
"eval_samples_per_second": 2.568, |
|
"eval_steps_per_second": 0.322, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.7562264756224e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|