|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.013875627425387843, |
|
"eval_steps": 9, |
|
"global_step": 99, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00014015785278169538, |
|
"eval_loss": 4.543891429901123, |
|
"eval_runtime": 2673.8664, |
|
"eval_samples_per_second": 8.988, |
|
"eval_steps_per_second": 4.494, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00042047355834508613, |
|
"grad_norm": 4.4303879737854, |
|
"learning_rate": 3e-05, |
|
"loss": 4.2509, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0008409471166901723, |
|
"grad_norm": 4.1902384757995605, |
|
"learning_rate": 6e-05, |
|
"loss": 4.0204, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0012614206750352585, |
|
"grad_norm": 2.7684712409973145, |
|
"learning_rate": 9e-05, |
|
"loss": 2.7809, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0012614206750352585, |
|
"eval_loss": 2.1958885192871094, |
|
"eval_runtime": 2674.8711, |
|
"eval_samples_per_second": 8.985, |
|
"eval_steps_per_second": 4.493, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0016818942333803445, |
|
"grad_norm": 2.783182382583618, |
|
"learning_rate": 0.00012, |
|
"loss": 1.9193, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002102367791725431, |
|
"grad_norm": 2.446138381958008, |
|
"learning_rate": 0.00015000000000000001, |
|
"loss": 1.6654, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002522841350070517, |
|
"grad_norm": 1.4197582006454468, |
|
"learning_rate": 0.00018, |
|
"loss": 1.4818, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.002522841350070517, |
|
"eval_loss": 1.463165283203125, |
|
"eval_runtime": 2674.6792, |
|
"eval_samples_per_second": 8.986, |
|
"eval_steps_per_second": 4.493, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.002943314908415603, |
|
"grad_norm": 1.5759952068328857, |
|
"learning_rate": 0.0001999229036240723, |
|
"loss": 1.4326, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.003363788466760689, |
|
"grad_norm": 1.0148948431015015, |
|
"learning_rate": 0.00019876883405951377, |
|
"loss": 1.5209, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.0037842620251057756, |
|
"grad_norm": 1.0281277894973755, |
|
"learning_rate": 0.00019624552364536473, |
|
"loss": 1.3521, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0037842620251057756, |
|
"eval_loss": 1.3808079957962036, |
|
"eval_runtime": 2674.464, |
|
"eval_samples_per_second": 8.986, |
|
"eval_steps_per_second": 4.493, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.004204735583450862, |
|
"grad_norm": 1.0885648727416992, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 1.4687, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.004625209141795947, |
|
"grad_norm": 1.1229428052902222, |
|
"learning_rate": 0.00018724960070727972, |
|
"loss": 1.336, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.005045682700141034, |
|
"grad_norm": 1.0429704189300537, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 1.3521, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.005045682700141034, |
|
"eval_loss": 1.3701305389404297, |
|
"eval_runtime": 3089.1504, |
|
"eval_samples_per_second": 7.78, |
|
"eval_steps_per_second": 3.89, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.00546615625848612, |
|
"grad_norm": 0.9580327868461609, |
|
"learning_rate": 0.00017343225094356855, |
|
"loss": 1.3615, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.005886629816831206, |
|
"grad_norm": 0.9180206656455994, |
|
"learning_rate": 0.00016494480483301836, |
|
"loss": 1.2992, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0063071033751762925, |
|
"grad_norm": 0.8730207681655884, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 1.2218, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.0063071033751762925, |
|
"eval_loss": 1.368905782699585, |
|
"eval_runtime": 2719.9889, |
|
"eval_samples_per_second": 8.836, |
|
"eval_steps_per_second": 4.418, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.006727576933521378, |
|
"grad_norm": 0.9200890064239502, |
|
"learning_rate": 0.00014539904997395468, |
|
"loss": 1.3726, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.007148050491866465, |
|
"grad_norm": 0.962693989276886, |
|
"learning_rate": 0.0001346117057077493, |
|
"loss": 1.3319, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.007568524050211551, |
|
"grad_norm": 0.935663640499115, |
|
"learning_rate": 0.00012334453638559057, |
|
"loss": 1.3547, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.007568524050211551, |
|
"eval_loss": 1.3415731191635132, |
|
"eval_runtime": 2673.8225, |
|
"eval_samples_per_second": 8.989, |
|
"eval_steps_per_second": 4.494, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.007988997608556637, |
|
"grad_norm": 0.9458617568016052, |
|
"learning_rate": 0.00011175373974578378, |
|
"loss": 1.3576, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.008409471166901723, |
|
"grad_norm": 0.8497123122215271, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2875, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.00882994472524681, |
|
"grad_norm": 0.7052412033081055, |
|
"learning_rate": 8.824626025421626e-05, |
|
"loss": 1.3197, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.00882994472524681, |
|
"eval_loss": 1.3330885171890259, |
|
"eval_runtime": 2673.7616, |
|
"eval_samples_per_second": 8.989, |
|
"eval_steps_per_second": 4.494, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.009250418283591895, |
|
"grad_norm": 0.8267295360565186, |
|
"learning_rate": 7.66554636144095e-05, |
|
"loss": 1.2522, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.009670891841936981, |
|
"grad_norm": 0.8061164021492004, |
|
"learning_rate": 6.538829429225069e-05, |
|
"loss": 1.2313, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.010091365400282068, |
|
"grad_norm": 0.8370426893234253, |
|
"learning_rate": 5.4600950026045326e-05, |
|
"loss": 1.2828, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.010091365400282068, |
|
"eval_loss": 1.3291566371917725, |
|
"eval_runtime": 2673.7837, |
|
"eval_samples_per_second": 8.989, |
|
"eval_steps_per_second": 4.494, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.010511838958627154, |
|
"grad_norm": 0.9212751984596252, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 1.3108, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.01093231251697224, |
|
"grad_norm": 0.8381961584091187, |
|
"learning_rate": 3.5055195166981645e-05, |
|
"loss": 1.3383, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.011352786075317325, |
|
"grad_norm": 0.8427872657775879, |
|
"learning_rate": 2.6567749056431467e-05, |
|
"loss": 1.3653, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.011352786075317325, |
|
"eval_loss": 1.324036717414856, |
|
"eval_runtime": 2673.9575, |
|
"eval_samples_per_second": 8.988, |
|
"eval_steps_per_second": 4.494, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.011773259633662412, |
|
"grad_norm": 0.9498192667961121, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 1.331, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.012193733192007498, |
|
"grad_norm": 0.8051778078079224, |
|
"learning_rate": 1.2750399292720283e-05, |
|
"loss": 1.2632, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.012614206750352585, |
|
"grad_norm": 0.9142070412635803, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 1.3475, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.012614206750352585, |
|
"eval_loss": 1.3223891258239746, |
|
"eval_runtime": 2674.1901, |
|
"eval_samples_per_second": 8.987, |
|
"eval_steps_per_second": 4.494, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.013034680308697671, |
|
"grad_norm": 0.7538347840309143, |
|
"learning_rate": 3.7544763546352834e-06, |
|
"loss": 1.2384, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.013455153867042756, |
|
"grad_norm": 0.7631012201309204, |
|
"learning_rate": 1.231165940486234e-06, |
|
"loss": 1.2526, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.013875627425387843, |
|
"grad_norm": 1.0042481422424316, |
|
"learning_rate": 7.709637592770991e-08, |
|
"loss": 1.3432, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.013875627425387843, |
|
"eval_loss": 1.321218729019165, |
|
"eval_runtime": 2673.6321, |
|
"eval_samples_per_second": 8.989, |
|
"eval_steps_per_second": 4.495, |
|
"step": 99 |
|
} |
|
], |
|
"logging_steps": 3, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 9, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.908402781143695e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|