|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.4708097928436911, |
|
"eval_steps": 63, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0018832391713747645, |
|
"eval_loss": 0.7905556559562683, |
|
"eval_runtime": 6.9905, |
|
"eval_samples_per_second": 16.022, |
|
"eval_steps_per_second": 8.011, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.018832391713747645, |
|
"grad_norm": 0.7901492118835449, |
|
"learning_rate": 0.00019979453927503364, |
|
"loss": 0.6816, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.03766478342749529, |
|
"grad_norm": 0.40178900957107544, |
|
"learning_rate": 0.00019815591569910654, |
|
"loss": 0.4805, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05649717514124294, |
|
"grad_norm": 0.5106184482574463, |
|
"learning_rate": 0.00019490557470106686, |
|
"loss": 0.4685, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.07532956685499058, |
|
"grad_norm": 0.49268558621406555, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 0.4709, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.09416195856873823, |
|
"grad_norm": 0.4141705632209778, |
|
"learning_rate": 0.00018380881048918405, |
|
"loss": 0.4638, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.11299435028248588, |
|
"grad_norm": 0.410208135843277, |
|
"learning_rate": 0.00017614459583691346, |
|
"loss": 0.3985, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.11864406779661017, |
|
"eval_loss": 0.4390982985496521, |
|
"eval_runtime": 6.0034, |
|
"eval_samples_per_second": 18.656, |
|
"eval_steps_per_second": 9.328, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.1318267419962335, |
|
"grad_norm": 0.7335411310195923, |
|
"learning_rate": 0.0001672300890261317, |
|
"loss": 0.4612, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.15065913370998116, |
|
"grad_norm": 0.48060673475265503, |
|
"learning_rate": 0.00015721166601221698, |
|
"loss": 0.4036, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1694915254237288, |
|
"grad_norm": 0.3535134196281433, |
|
"learning_rate": 0.00014625382902408356, |
|
"loss": 0.4005, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.18832391713747645, |
|
"grad_norm": 0.3984243869781494, |
|
"learning_rate": 0.00013453650544213076, |
|
"loss": 0.4554, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.2071563088512241, |
|
"grad_norm": 0.4779428541660309, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 0.4137, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.22598870056497175, |
|
"grad_norm": 0.4857834577560425, |
|
"learning_rate": 0.00010960230259076818, |
|
"loss": 0.4406, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.23728813559322035, |
|
"eval_loss": 0.4128398299217224, |
|
"eval_runtime": 5.9848, |
|
"eval_samples_per_second": 18.714, |
|
"eval_steps_per_second": 9.357, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.2448210922787194, |
|
"grad_norm": 0.40318259596824646, |
|
"learning_rate": 9.679484224283449e-05, |
|
"loss": 0.4051, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.263653483992467, |
|
"grad_norm": 0.37790176272392273, |
|
"learning_rate": 8.404001049666211e-05, |
|
"loss": 0.4071, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.2824858757062147, |
|
"grad_norm": 0.5717161893844604, |
|
"learning_rate": 7.154724133689677e-05, |
|
"loss": 0.4225, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.3013182674199623, |
|
"grad_norm": 0.4312959313392639, |
|
"learning_rate": 5.952166568776062e-05, |
|
"loss": 0.3938, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.32015065913371, |
|
"grad_norm": 0.44728565216064453, |
|
"learning_rate": 4.8160743168947496e-05, |
|
"loss": 0.3668, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.3389830508474576, |
|
"grad_norm": 0.36283445358276367, |
|
"learning_rate": 3.7651019814126654e-05, |
|
"loss": 0.4178, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.3559322033898305, |
|
"eval_loss": 0.3976288139820099, |
|
"eval_runtime": 5.9895, |
|
"eval_samples_per_second": 18.7, |
|
"eval_steps_per_second": 9.35, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.3578154425612053, |
|
"grad_norm": 0.40222638845443726, |
|
"learning_rate": 2.8165064990227252e-05, |
|
"loss": 0.3894, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.3766478342749529, |
|
"grad_norm": 0.3540307283401489, |
|
"learning_rate": 1.985863781320435e-05, |
|
"loss": 0.4443, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.3954802259887006, |
|
"grad_norm": 0.4166598320007324, |
|
"learning_rate": 1.286812958766106e-05, |
|
"loss": 0.3663, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.4143126177024482, |
|
"grad_norm": 0.5152053236961365, |
|
"learning_rate": 7.308324265397836e-06, |
|
"loss": 0.3393, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.4331450094161959, |
|
"grad_norm": 0.3762021064758301, |
|
"learning_rate": 3.270513696097055e-06, |
|
"loss": 0.3951, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.4519774011299435, |
|
"grad_norm": 0.39529839158058167, |
|
"learning_rate": 8.209986176753948e-07, |
|
"loss": 0.3999, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.4708097928436911, |
|
"grad_norm": 0.5087373852729797, |
|
"learning_rate": 0.0, |
|
"loss": 0.4061, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 250, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 63, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.8219503632384e+16, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|