{ "best_metric": 1.653587818145752, "best_model_checkpoint": "./pippa-sharegpt-13b-qlora/checkpoint-50", "epoch": 0.5277044854881267, "eval_steps": 50, "global_step": 50, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.01, "learning_rate": 2e-05, "loss": 1.9036, "step": 1 }, { "epoch": 0.02, "learning_rate": 4e-05, "loss": 1.9209, "step": 2 }, { "epoch": 0.03, "learning_rate": 6e-05, "loss": 1.9161, "step": 3 }, { "epoch": 0.04, "learning_rate": 8e-05, "loss": 1.8722, "step": 4 }, { "epoch": 0.05, "learning_rate": 0.0001, "loss": 1.866, "step": 5 }, { "epoch": 0.06, "learning_rate": 0.00012, "loss": 1.872, "step": 6 }, { "epoch": 0.07, "learning_rate": 0.00014, "loss": 1.9138, "step": 7 }, { "epoch": 0.08, "learning_rate": 0.00016, "loss": 1.8785, "step": 8 }, { "epoch": 0.09, "learning_rate": 0.00018, "loss": 1.8013, "step": 9 }, { "epoch": 0.11, "learning_rate": 0.0002, "loss": 1.7859, "step": 10 }, { "epoch": 0.12, "learning_rate": 0.00019999332998034514, "loss": 1.7805, "step": 11 }, { "epoch": 0.13, "learning_rate": 0.00019997332081116373, "loss": 1.7716, "step": 12 }, { "epoch": 0.14, "learning_rate": 0.00019993997516168689, "loss": 1.7453, "step": 13 }, { "epoch": 0.15, "learning_rate": 0.00019989329748023725, "loss": 1.7306, "step": 14 }, { "epoch": 0.16, "learning_rate": 0.00019983329399363598, "loss": 1.7535, "step": 15 }, { "epoch": 0.17, "learning_rate": 0.0001997599727063717, "loss": 1.7811, "step": 16 }, { "epoch": 0.18, "learning_rate": 0.000199673343399533, "loss": 1.7455, "step": 17 }, { "epoch": 0.19, "learning_rate": 0.00019957341762950344, "loss": 1.748, "step": 18 }, { "epoch": 0.2, "learning_rate": 0.0001994602087264201, "loss": 1.723, "step": 19 }, { "epoch": 0.21, "learning_rate": 0.00019933373179239502, "loss": 1.7539, "step": 20 }, { "epoch": 0.22, "learning_rate": 0.000199194003699501, "loss": 1.7293, "step": 21 }, { "epoch": 0.23, "learning_rate": 0.0001990410430875205, "loss": 1.6857, "step": 22 }, { "epoch": 0.24, "learning_rate": 0.0001988748703614594, "loss": 1.7209, "step": 23 }, { "epoch": 0.25, "learning_rate": 0.00019869550768882455, "loss": 1.7127, "step": 24 }, { "epoch": 0.26, "learning_rate": 0.0001985029789966671, "loss": 1.7024, "step": 25 }, { "epoch": 0.27, "learning_rate": 0.0001982973099683902, "loss": 1.6927, "step": 26 }, { "epoch": 0.28, "learning_rate": 0.00019807852804032305, "loss": 1.6584, "step": 27 }, { "epoch": 0.3, "learning_rate": 0.0001978466623980609, "loss": 1.7149, "step": 28 }, { "epoch": 0.31, "learning_rate": 0.00019760174397257156, "loss": 1.6815, "step": 29 }, { "epoch": 0.32, "learning_rate": 0.0001973438054360693, "loss": 1.7067, "step": 30 }, { "epoch": 0.33, "learning_rate": 0.00019707288119765623, "loss": 1.6799, "step": 31 }, { "epoch": 0.34, "learning_rate": 0.00019678900739873226, "loss": 1.7206, "step": 32 }, { "epoch": 0.35, "learning_rate": 0.0001964922219081738, "loss": 1.6458, "step": 33 }, { "epoch": 0.36, "learning_rate": 0.00019618256431728194, "loss": 1.6649, "step": 34 }, { "epoch": 0.37, "learning_rate": 0.00019586007593450097, "loss": 1.6684, "step": 35 }, { "epoch": 0.38, "learning_rate": 0.000195524799779908, "loss": 1.7411, "step": 36 }, { "epoch": 0.39, "learning_rate": 0.00019517678057947384, "loss": 1.6678, "step": 37 }, { "epoch": 0.4, "learning_rate": 0.0001948160647590966, "loss": 1.6738, "step": 38 }, { "epoch": 0.41, "learning_rate": 0.00019444270043840852, "loss": 1.6804, "step": 39 }, { "epoch": 0.42, "learning_rate": 0.00019405673742435678, "loss": 1.6728, "step": 40 }, { "epoch": 0.43, "learning_rate": 0.00019365822720455916, "loss": 1.6941, "step": 41 }, { "epoch": 0.44, "learning_rate": 0.00019324722294043558, "loss": 1.6861, "step": 42 }, { "epoch": 0.45, "learning_rate": 0.00019282377946011652, "loss": 1.6751, "step": 43 }, { "epoch": 0.46, "learning_rate": 0.0001923879532511287, "loss": 1.7175, "step": 44 }, { "epoch": 0.47, "learning_rate": 0.00019193980245285966, "loss": 1.6454, "step": 45 }, { "epoch": 0.49, "learning_rate": 0.0001914793868488021, "loss": 1.6934, "step": 46 }, { "epoch": 0.5, "learning_rate": 0.0001910067678585786, "loss": 1.6534, "step": 47 }, { "epoch": 0.51, "learning_rate": 0.00019052200852974819, "loss": 1.6797, "step": 48 }, { "epoch": 0.52, "learning_rate": 0.00019002517352939598, "loss": 1.6566, "step": 49 }, { "epoch": 0.53, "learning_rate": 0.00018951632913550626, "loss": 1.657, "step": 50 }, { "epoch": 0.53, "eval_loss": 1.653587818145752, "eval_runtime": 200.2473, "eval_samples_per_second": 6.672, "eval_steps_per_second": 0.21, "step": 50 } ], "logging_steps": 1, "max_steps": 282, "num_train_epochs": 3, "save_steps": 50, "total_flos": 5.1673281053956506e+17, "trial_name": null, "trial_params": null }