|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10332713370531102, |
|
"eval_steps": 63, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004133085348212441, |
|
"eval_loss": 1.3292961120605469, |
|
"eval_runtime": 58.898, |
|
"eval_samples_per_second": 17.301, |
|
"eval_steps_per_second": 8.659, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0041330853482124405, |
|
"grad_norm": 3.00954270362854, |
|
"learning_rate": 0.00019979453927503364, |
|
"loss": 4.8311, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008266170696424881, |
|
"grad_norm": 3.6272060871124268, |
|
"learning_rate": 0.00019815591569910654, |
|
"loss": 3.0185, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.012399256044637322, |
|
"grad_norm": 4.03973913192749, |
|
"learning_rate": 0.00019490557470106686, |
|
"loss": 2.6936, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.016532341392849762, |
|
"grad_norm": 2.1286351680755615, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 2.2795, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020665426741062202, |
|
"grad_norm": 3.672022819519043, |
|
"learning_rate": 0.00018380881048918405, |
|
"loss": 2.5414, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.024798512089274645, |
|
"grad_norm": 2.8622655868530273, |
|
"learning_rate": 0.00017614459583691346, |
|
"loss": 2.4407, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.026038437693738377, |
|
"eval_loss": 0.5793694257736206, |
|
"eval_runtime": 58.9316, |
|
"eval_samples_per_second": 17.291, |
|
"eval_steps_per_second": 8.654, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.028931597437487085, |
|
"grad_norm": 3.937514543533325, |
|
"learning_rate": 0.0001672300890261317, |
|
"loss": 2.5857, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.033064682785699524, |
|
"grad_norm": 3.3631467819213867, |
|
"learning_rate": 0.00015721166601221698, |
|
"loss": 2.6145, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.037197768133911964, |
|
"grad_norm": 2.0942676067352295, |
|
"learning_rate": 0.00014625382902408356, |
|
"loss": 2.3387, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.041330853482124404, |
|
"grad_norm": 3.093599319458008, |
|
"learning_rate": 0.00013453650544213076, |
|
"loss": 2.2256, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04546393883033684, |
|
"grad_norm": 2.044689893722534, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 2.5785, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.04959702417854929, |
|
"grad_norm": 3.6700918674468994, |
|
"learning_rate": 0.00010960230259076818, |
|
"loss": 2.2067, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.052076875387476754, |
|
"eval_loss": 0.5599912405014038, |
|
"eval_runtime": 59.0303, |
|
"eval_samples_per_second": 17.262, |
|
"eval_steps_per_second": 8.64, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.05373010952676173, |
|
"grad_norm": 2.340982437133789, |
|
"learning_rate": 9.679484224283449e-05, |
|
"loss": 2.312, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.05786319487497417, |
|
"grad_norm": 3.4717183113098145, |
|
"learning_rate": 8.404001049666211e-05, |
|
"loss": 2.3654, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.06199628022318661, |
|
"grad_norm": 3.637983798980713, |
|
"learning_rate": 7.154724133689677e-05, |
|
"loss": 2.5008, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.06612936557139905, |
|
"grad_norm": 2.1561226844787598, |
|
"learning_rate": 5.952166568776062e-05, |
|
"loss": 2.1666, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.07026245091961149, |
|
"grad_norm": 1.9643546342849731, |
|
"learning_rate": 4.8160743168947496e-05, |
|
"loss": 2.1154, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.07439553626782393, |
|
"grad_norm": 2.2433414459228516, |
|
"learning_rate": 3.7651019814126654e-05, |
|
"loss": 2.0846, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.07811531308121512, |
|
"eval_loss": 0.5466768741607666, |
|
"eval_runtime": 58.8836, |
|
"eval_samples_per_second": 17.305, |
|
"eval_steps_per_second": 8.661, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.07852862161603637, |
|
"grad_norm": 3.939530611038208, |
|
"learning_rate": 2.8165064990227252e-05, |
|
"loss": 2.1509, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.08266170696424881, |
|
"grad_norm": 2.519536256790161, |
|
"learning_rate": 1.985863781320435e-05, |
|
"loss": 2.2889, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.08679479231246125, |
|
"grad_norm": 2.479623317718506, |
|
"learning_rate": 1.286812958766106e-05, |
|
"loss": 2.3048, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.09092787766067369, |
|
"grad_norm": 1.8329150676727295, |
|
"learning_rate": 7.308324265397836e-06, |
|
"loss": 1.8351, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.09506096300888614, |
|
"grad_norm": 2.6483101844787598, |
|
"learning_rate": 3.270513696097055e-06, |
|
"loss": 1.9886, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.09919404835709858, |
|
"grad_norm": 2.6098995208740234, |
|
"learning_rate": 8.209986176753948e-07, |
|
"loss": 2.1835, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.10332713370531102, |
|
"grad_norm": 2.4620838165283203, |
|
"learning_rate": 0.0, |
|
"loss": 2.02, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 63, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.49341841408e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|