klue-roberta-large-korquad-v1 / trainer_state.json
ocean010315
fix: model epoch=2
2f75d2c
raw
history blame
3.95 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 50,
"global_step": 8676,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11526048870447211,
"grad_norm": 20.16172218322754,
"learning_rate": 9.960000000000001e-06,
"loss": 2.3764,
"step": 500
},
{
"epoch": 0.23052097740894423,
"grad_norm": 16.483428955078125,
"learning_rate": 9.393346379647749e-06,
"loss": 0.5479,
"step": 1000
},
{
"epoch": 0.3457814661134163,
"grad_norm": 13.152728080749512,
"learning_rate": 8.781800391389433e-06,
"loss": 0.4338,
"step": 1500
},
{
"epoch": 0.46104195481788846,
"grad_norm": 8.478280067443848,
"learning_rate": 8.170254403131116e-06,
"loss": 0.3804,
"step": 2000
},
{
"epoch": 0.5763024435223605,
"grad_norm": 7.399623870849609,
"learning_rate": 7.558708414872799e-06,
"loss": 0.3876,
"step": 2500
},
{
"epoch": 0.6915629322268326,
"grad_norm": 15.365309715270996,
"learning_rate": 6.947162426614482e-06,
"loss": 0.3612,
"step": 3000
},
{
"epoch": 0.8068234209313048,
"grad_norm": 5.001895427703857,
"learning_rate": 6.335616438356164e-06,
"loss": 0.3709,
"step": 3500
},
{
"epoch": 0.9220839096357769,
"grad_norm": 20.685277938842773,
"learning_rate": 5.7240704500978485e-06,
"loss": 0.3376,
"step": 4000
},
{
"epoch": 1.037344398340249,
"grad_norm": 12.555818557739258,
"learning_rate": 5.112524461839531e-06,
"loss": 0.2983,
"step": 4500
},
{
"epoch": 1.152604887044721,
"grad_norm": 7.005858898162842,
"learning_rate": 4.502201565557731e-06,
"loss": 0.221,
"step": 5000
},
{
"epoch": 1.2678653757491931,
"grad_norm": 4.69966459274292,
"learning_rate": 3.890655577299413e-06,
"loss": 0.2095,
"step": 5500
},
{
"epoch": 1.3831258644536653,
"grad_norm": 24.06895637512207,
"learning_rate": 3.2791095890410964e-06,
"loss": 0.2292,
"step": 6000
},
{
"epoch": 1.4983863531581374,
"grad_norm": 9.91574764251709,
"learning_rate": 2.6675636007827793e-06,
"loss": 0.2304,
"step": 6500
},
{
"epoch": 1.6136468418626095,
"grad_norm": 3.978304862976074,
"learning_rate": 2.0560176125244623e-06,
"loss": 0.2102,
"step": 7000
},
{
"epoch": 1.7289073305670817,
"grad_norm": 8.447595596313477,
"learning_rate": 1.4469178082191781e-06,
"loss": 0.2171,
"step": 7500
},
{
"epoch": 1.8441678192715538,
"grad_norm": 3.0918211936950684,
"learning_rate": 8.353718199608611e-07,
"loss": 0.2166,
"step": 8000
},
{
"epoch": 1.959428307976026,
"grad_norm": 1.6782150268554688,
"learning_rate": 2.2382583170254403e-07,
"loss": 0.2146,
"step": 8500
},
{
"epoch": 2.0,
"step": 8676,
"total_flos": 9.602043799869562e+16,
"train_loss": 0.42129573760465744,
"train_runtime": 3759.5481,
"train_samples_per_second": 36.919,
"train_steps_per_second": 2.308
}
],
"logging_steps": 500,
"max_steps": 8676,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 9.602043799869562e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}