mamung's picture
Training in progress, step 72, checkpoint
418d850 verified
raw
history blame
6.8 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.010091365400282068,
"eval_steps": 9,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00014015785278169538,
"eval_loss": 4.543891429901123,
"eval_runtime": 2673.8664,
"eval_samples_per_second": 8.988,
"eval_steps_per_second": 4.494,
"step": 1
},
{
"epoch": 0.00042047355834508613,
"grad_norm": 4.4303879737854,
"learning_rate": 3e-05,
"loss": 4.2509,
"step": 3
},
{
"epoch": 0.0008409471166901723,
"grad_norm": 4.1902384757995605,
"learning_rate": 6e-05,
"loss": 4.0204,
"step": 6
},
{
"epoch": 0.0012614206750352585,
"grad_norm": 2.7684712409973145,
"learning_rate": 9e-05,
"loss": 2.7809,
"step": 9
},
{
"epoch": 0.0012614206750352585,
"eval_loss": 2.1958885192871094,
"eval_runtime": 2674.8711,
"eval_samples_per_second": 8.985,
"eval_steps_per_second": 4.493,
"step": 9
},
{
"epoch": 0.0016818942333803445,
"grad_norm": 2.783182382583618,
"learning_rate": 0.00012,
"loss": 1.9193,
"step": 12
},
{
"epoch": 0.002102367791725431,
"grad_norm": 2.446138381958008,
"learning_rate": 0.00015000000000000001,
"loss": 1.6654,
"step": 15
},
{
"epoch": 0.002522841350070517,
"grad_norm": 1.4197582006454468,
"learning_rate": 0.00018,
"loss": 1.4818,
"step": 18
},
{
"epoch": 0.002522841350070517,
"eval_loss": 1.463165283203125,
"eval_runtime": 2674.6792,
"eval_samples_per_second": 8.986,
"eval_steps_per_second": 4.493,
"step": 18
},
{
"epoch": 0.002943314908415603,
"grad_norm": 1.5759952068328857,
"learning_rate": 0.0001999229036240723,
"loss": 1.4326,
"step": 21
},
{
"epoch": 0.003363788466760689,
"grad_norm": 1.0148948431015015,
"learning_rate": 0.00019876883405951377,
"loss": 1.5209,
"step": 24
},
{
"epoch": 0.0037842620251057756,
"grad_norm": 1.0281277894973755,
"learning_rate": 0.00019624552364536473,
"loss": 1.3521,
"step": 27
},
{
"epoch": 0.0037842620251057756,
"eval_loss": 1.3808079957962036,
"eval_runtime": 2674.464,
"eval_samples_per_second": 8.986,
"eval_steps_per_second": 4.493,
"step": 27
},
{
"epoch": 0.004204735583450862,
"grad_norm": 1.0885648727416992,
"learning_rate": 0.0001923879532511287,
"loss": 1.4687,
"step": 30
},
{
"epoch": 0.004625209141795947,
"grad_norm": 1.1229428052902222,
"learning_rate": 0.00018724960070727972,
"loss": 1.336,
"step": 33
},
{
"epoch": 0.005045682700141034,
"grad_norm": 1.0429704189300537,
"learning_rate": 0.00018090169943749476,
"loss": 1.3521,
"step": 36
},
{
"epoch": 0.005045682700141034,
"eval_loss": 1.3701305389404297,
"eval_runtime": 3089.1504,
"eval_samples_per_second": 7.78,
"eval_steps_per_second": 3.89,
"step": 36
},
{
"epoch": 0.00546615625848612,
"grad_norm": 0.9580327868461609,
"learning_rate": 0.00017343225094356855,
"loss": 1.3615,
"step": 39
},
{
"epoch": 0.005886629816831206,
"grad_norm": 0.9180206656455994,
"learning_rate": 0.00016494480483301836,
"loss": 1.2992,
"step": 42
},
{
"epoch": 0.0063071033751762925,
"grad_norm": 0.8730207681655884,
"learning_rate": 0.00015555702330196023,
"loss": 1.2218,
"step": 45
},
{
"epoch": 0.0063071033751762925,
"eval_loss": 1.368905782699585,
"eval_runtime": 2719.9889,
"eval_samples_per_second": 8.836,
"eval_steps_per_second": 4.418,
"step": 45
},
{
"epoch": 0.006727576933521378,
"grad_norm": 0.9200890064239502,
"learning_rate": 0.00014539904997395468,
"loss": 1.3726,
"step": 48
},
{
"epoch": 0.007148050491866465,
"grad_norm": 0.962693989276886,
"learning_rate": 0.0001346117057077493,
"loss": 1.3319,
"step": 51
},
{
"epoch": 0.007568524050211551,
"grad_norm": 0.935663640499115,
"learning_rate": 0.00012334453638559057,
"loss": 1.3547,
"step": 54
},
{
"epoch": 0.007568524050211551,
"eval_loss": 1.3415731191635132,
"eval_runtime": 2673.8225,
"eval_samples_per_second": 8.989,
"eval_steps_per_second": 4.494,
"step": 54
},
{
"epoch": 0.007988997608556637,
"grad_norm": 0.9458617568016052,
"learning_rate": 0.00011175373974578378,
"loss": 1.3576,
"step": 57
},
{
"epoch": 0.008409471166901723,
"grad_norm": 0.8497123122215271,
"learning_rate": 0.0001,
"loss": 1.2875,
"step": 60
},
{
"epoch": 0.00882994472524681,
"grad_norm": 0.7052412033081055,
"learning_rate": 8.824626025421626e-05,
"loss": 1.3197,
"step": 63
},
{
"epoch": 0.00882994472524681,
"eval_loss": 1.3330885171890259,
"eval_runtime": 2673.7616,
"eval_samples_per_second": 8.989,
"eval_steps_per_second": 4.494,
"step": 63
},
{
"epoch": 0.009250418283591895,
"grad_norm": 0.8267295360565186,
"learning_rate": 7.66554636144095e-05,
"loss": 1.2522,
"step": 66
},
{
"epoch": 0.009670891841936981,
"grad_norm": 0.8061164021492004,
"learning_rate": 6.538829429225069e-05,
"loss": 1.2313,
"step": 69
},
{
"epoch": 0.010091365400282068,
"grad_norm": 0.8370426893234253,
"learning_rate": 5.4600950026045326e-05,
"loss": 1.2828,
"step": 72
},
{
"epoch": 0.010091365400282068,
"eval_loss": 1.3291566371917725,
"eval_runtime": 2673.7837,
"eval_samples_per_second": 8.989,
"eval_steps_per_second": 4.494,
"step": 72
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.297020204468142e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}