Grogros-dmWM-Llama-3.2-1B-Instruct-HarmData-Al4-OWT-d4-a0.25-learnability_adv
/
checkpoint-1500
/trainer_state.json
{ | |
"best_metric": null, | |
"best_model_checkpoint": null, | |
"epoch": 0.04595517838268409, | |
"eval_steps": 500, | |
"global_step": 1500, | |
"is_hyper_param_search": false, | |
"is_local_process_zero": true, | |
"is_world_process_zero": true, | |
"log_history": [ | |
{ | |
"epoch": 0.003063678558845606, | |
"grad_norm": 2.935556411743164, | |
"learning_rate": 4.000000000000001e-06, | |
"loss": 3.0, | |
"step": 100 | |
}, | |
{ | |
"epoch": 0.006127357117691212, | |
"grad_norm": 2.975250005722046, | |
"learning_rate": 8.000000000000001e-06, | |
"loss": 2.9241, | |
"step": 200 | |
}, | |
{ | |
"epoch": 0.009191035676536818, | |
"grad_norm": 2.8458304405212402, | |
"learning_rate": 9.987820251299121e-06, | |
"loss": 2.9023, | |
"step": 300 | |
}, | |
{ | |
"epoch": 0.012254714235382424, | |
"grad_norm": 2.9931607246398926, | |
"learning_rate": 9.890738003669029e-06, | |
"loss": 2.8897, | |
"step": 400 | |
}, | |
{ | |
"epoch": 0.015318392794228029, | |
"grad_norm": 2.8005623817443848, | |
"learning_rate": 9.698463103929542e-06, | |
"loss": 2.8751, | |
"step": 500 | |
}, | |
{ | |
"epoch": 0.018382071353073636, | |
"grad_norm": 2.8539531230926514, | |
"learning_rate": 9.414737964294636e-06, | |
"loss": 2.8567, | |
"step": 600 | |
}, | |
{ | |
"epoch": 0.02144574991191924, | |
"grad_norm": 2.6978955268859863, | |
"learning_rate": 9.045084971874738e-06, | |
"loss": 2.8676, | |
"step": 700 | |
}, | |
{ | |
"epoch": 0.024509428470764847, | |
"grad_norm": 2.8115577697753906, | |
"learning_rate": 8.596699001693257e-06, | |
"loss": 2.8524, | |
"step": 800 | |
}, | |
{ | |
"epoch": 0.027573107029610452, | |
"grad_norm": 2.6445884704589844, | |
"learning_rate": 8.078307376628292e-06, | |
"loss": 2.8612, | |
"step": 900 | |
}, | |
{ | |
"epoch": 0.030636785588456058, | |
"grad_norm": 2.5841586589813232, | |
"learning_rate": 7.500000000000001e-06, | |
"loss": 2.8564, | |
"step": 1000 | |
}, | |
{ | |
"epoch": 0.03370046414730166, | |
"grad_norm": 2.6466314792633057, | |
"learning_rate": 6.873032967079562e-06, | |
"loss": 2.8298, | |
"step": 1100 | |
}, | |
{ | |
"epoch": 0.03676414270614727, | |
"grad_norm": 2.583447217941284, | |
"learning_rate": 6.209609477998339e-06, | |
"loss": 2.8302, | |
"step": 1200 | |
}, | |
{ | |
"epoch": 0.039827821264992874, | |
"grad_norm": 2.628443956375122, | |
"learning_rate": 5.522642316338268e-06, | |
"loss": 2.8323, | |
"step": 1300 | |
}, | |
{ | |
"epoch": 0.04289149982383848, | |
"grad_norm": 2.5776467323303223, | |
"learning_rate": 4.825502516487497e-06, | |
"loss": 2.8347, | |
"step": 1400 | |
}, | |
{ | |
"epoch": 0.04595517838268409, | |
"grad_norm": 2.7446603775024414, | |
"learning_rate": 4.131759111665349e-06, | |
"loss": 2.8188, | |
"step": 1500 | |
} | |
], | |
"logging_steps": 100, | |
"max_steps": 2500, | |
"num_input_tokens_seen": 0, | |
"num_train_epochs": 1, | |
"save_steps": 500, | |
"stateful_callbacks": { | |
"TrainerControl": { | |
"args": { | |
"should_epoch_stop": false, | |
"should_evaluate": false, | |
"should_log": false, | |
"should_save": true, | |
"should_training_stop": false | |
}, | |
"attributes": {} | |
} | |
}, | |
"total_flos": 1.43496233091072e+17, | |
"train_batch_size": 16, | |
"trial_name": null, | |
"trial_params": null | |
} | |