|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.6284367635506677, |
|
"eval_steps": 50, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.008379156847342237, |
|
"grad_norm": 0.5054947733879089, |
|
"learning_rate": 1e-05, |
|
"loss": 2.9505, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.008379156847342237, |
|
"eval_loss": 3.054915428161621, |
|
"eval_runtime": 43.2673, |
|
"eval_samples_per_second": 4.669, |
|
"eval_steps_per_second": 4.669, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.016758313694684474, |
|
"grad_norm": 0.3709588348865509, |
|
"learning_rate": 2e-05, |
|
"loss": 2.8603, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.02513747054202671, |
|
"grad_norm": 0.5003033876419067, |
|
"learning_rate": 3e-05, |
|
"loss": 2.8325, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.03351662738936895, |
|
"grad_norm": 0.3898451328277588, |
|
"learning_rate": 4e-05, |
|
"loss": 2.7888, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.041895784236711184, |
|
"grad_norm": 0.4698517918586731, |
|
"learning_rate": 5e-05, |
|
"loss": 2.8257, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.05027494108405342, |
|
"grad_norm": 0.4781450927257538, |
|
"learning_rate": 6e-05, |
|
"loss": 2.9094, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.05865409793139565, |
|
"grad_norm": 0.6419183015823364, |
|
"learning_rate": 7e-05, |
|
"loss": 2.8045, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0670332547787379, |
|
"grad_norm": 0.7329452633857727, |
|
"learning_rate": 8e-05, |
|
"loss": 2.7656, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.07541241162608013, |
|
"grad_norm": 0.8872014284133911, |
|
"learning_rate": 9e-05, |
|
"loss": 2.9754, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.08379156847342237, |
|
"grad_norm": 1.0049771070480347, |
|
"learning_rate": 0.0001, |
|
"loss": 2.7885, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0921707253207646, |
|
"grad_norm": 1.288948655128479, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.862, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.10054988216810684, |
|
"grad_norm": 1.6090466976165771, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.9898, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.10892903901544906, |
|
"grad_norm": 1.5656819343566895, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 2.8883, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.1173081958627913, |
|
"grad_norm": 1.6467982530593872, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.7058, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.12568735271013354, |
|
"grad_norm": 2.0958547592163086, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.7129, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1340665095574758, |
|
"grad_norm": 1.0730005502700806, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.7274, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.142445666404818, |
|
"grad_norm": 1.0618668794631958, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.5979, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.15082482325216026, |
|
"grad_norm": 0.4424041211605072, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.5467, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.15920398009950248, |
|
"grad_norm": 0.5938422679901123, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.6563, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.16758313694684474, |
|
"grad_norm": 0.5060924291610718, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.6239, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.17596229379418696, |
|
"grad_norm": 0.5067716836929321, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.5558, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.1843414506415292, |
|
"grad_norm": 0.5283418297767639, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.6283, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.19272060748887143, |
|
"grad_norm": 0.5521147847175598, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.5582, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.20109976433621368, |
|
"grad_norm": 0.6335211396217346, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.6768, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2094789211835559, |
|
"grad_norm": 1.2375539541244507, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.6038, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.21785807803089813, |
|
"grad_norm": 1.3271223306655884, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.5917, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.22623723487824038, |
|
"grad_norm": 0.4055769145488739, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.3628, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.2346163917255826, |
|
"grad_norm": 0.964474081993103, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.5157, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.24299554857292485, |
|
"grad_norm": 0.6849140524864197, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 2.6839, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.2513747054202671, |
|
"grad_norm": 0.4390021562576294, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.5509, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.2597538622676093, |
|
"grad_norm": 0.7394634485244751, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 2.5447, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2681330191149516, |
|
"grad_norm": 0.45920076966285706, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.5397, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.27651217596229377, |
|
"grad_norm": 0.43246570229530334, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 2.6423, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.284891332809636, |
|
"grad_norm": 0.38203856348991394, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.4129, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.2932704896569783, |
|
"grad_norm": 0.37578803300857544, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 2.5367, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.3016496465043205, |
|
"grad_norm": 0.6231028437614441, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.504, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.3100288033516627, |
|
"grad_norm": 0.48215481638908386, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.5128, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.31840796019900497, |
|
"grad_norm": 0.8526724576950073, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.4417, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.3267871170463472, |
|
"grad_norm": 0.42325401306152344, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.484, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.33516627389368947, |
|
"grad_norm": 0.45637720823287964, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.4911, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.34354543074103167, |
|
"grad_norm": 0.5629859566688538, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 2.4716, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.3519245875883739, |
|
"grad_norm": 0.6218668222427368, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.3725, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.36030374443571617, |
|
"grad_norm": 0.5280422568321228, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 2.6807, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.3686829012830584, |
|
"grad_norm": 0.4046926200389862, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.4806, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.3770620581304006, |
|
"grad_norm": 0.3766598701477051, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.2953, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.38544121497774286, |
|
"grad_norm": 0.5563957691192627, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.5368, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.3938203718250851, |
|
"grad_norm": 0.403656542301178, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 2.4207, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.40219952867242736, |
|
"grad_norm": 0.4429378807544708, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.637, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.41057868551976956, |
|
"grad_norm": 0.46513986587524414, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 2.3988, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.4189578423671118, |
|
"grad_norm": 0.4939236640930176, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.6023, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.4189578423671118, |
|
"eval_loss": 2.578394889831543, |
|
"eval_runtime": 42.4269, |
|
"eval_samples_per_second": 4.761, |
|
"eval_steps_per_second": 4.761, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.42733699921445406, |
|
"grad_norm": 0.4209323823451996, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 2.4749, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.43571615606179626, |
|
"grad_norm": 0.4266431927680969, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 2.4692, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.4440953129091385, |
|
"grad_norm": 0.3953860402107239, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 2.3527, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.45247446975648076, |
|
"grad_norm": 0.4726410210132599, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 2.4229, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.460853626603823, |
|
"grad_norm": 0.4370991289615631, |
|
"learning_rate": 5e-05, |
|
"loss": 2.5421, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.4692327834511652, |
|
"grad_norm": 0.4647808074951172, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 2.5684, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.47761194029850745, |
|
"grad_norm": 0.4097454249858856, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 2.4871, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.4859910971458497, |
|
"grad_norm": 0.4504952132701874, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 2.4055, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.49437025399319195, |
|
"grad_norm": 0.461972177028656, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 2.5632, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.5027494108405341, |
|
"grad_norm": 0.42533305287361145, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.4914, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.5111285676878764, |
|
"grad_norm": 0.4510684311389923, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 2.5091, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.5195077245352187, |
|
"grad_norm": 0.42407482862472534, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 2.3668, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.5278868813825609, |
|
"grad_norm": 0.431485116481781, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 2.4541, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.5362660382299032, |
|
"grad_norm": 0.4215572774410248, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 2.3595, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.5446451950772454, |
|
"grad_norm": 0.5403610467910767, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 2.7011, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.5530243519245875, |
|
"grad_norm": 0.43399372696876526, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 2.367, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.5614035087719298, |
|
"grad_norm": 0.49265211820602417, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 2.3943, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.569782665619272, |
|
"grad_norm": 0.4823525547981262, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 2.3512, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.5781618224666143, |
|
"grad_norm": 0.4148399829864502, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 2.4461, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.5865409793139565, |
|
"grad_norm": 0.45690131187438965, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.3753, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5949201361612988, |
|
"grad_norm": 0.3864487111568451, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 2.3544, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.603299293008641, |
|
"grad_norm": 0.4677547812461853, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.5031, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.6116784498559832, |
|
"grad_norm": 0.37837573885917664, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 2.3215, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.6200576067033254, |
|
"grad_norm": 0.47641924023628235, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 2.4183, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.6284367635506677, |
|
"grad_norm": 0.5523366928100586, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 2.5932, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.88624772612096e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|