mamung's picture
Training in progress, step 100, checkpoint
499bbd6 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.5710206995003569,
"eval_steps": 9,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.005710206995003569,
"eval_loss": 4.834342956542969,
"eval_runtime": 65.5166,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.503,
"step": 1
},
{
"epoch": 0.017130620985010708,
"grad_norm": 3.7000343799591064,
"learning_rate": 3e-05,
"loss": 4.5765,
"step": 3
},
{
"epoch": 0.034261241970021415,
"grad_norm": 4.092134475708008,
"learning_rate": 6e-05,
"loss": 4.5819,
"step": 6
},
{
"epoch": 0.05139186295503212,
"grad_norm": 4.719603538513184,
"learning_rate": 9e-05,
"loss": 3.6777,
"step": 9
},
{
"epoch": 0.05139186295503212,
"eval_loss": 3.054090738296509,
"eval_runtime": 65.7842,
"eval_samples_per_second": 8.969,
"eval_steps_per_second": 4.484,
"step": 9
},
{
"epoch": 0.06852248394004283,
"grad_norm": 3.8146257400512695,
"learning_rate": 0.00012,
"loss": 2.8501,
"step": 12
},
{
"epoch": 0.08565310492505353,
"grad_norm": 2.3158440589904785,
"learning_rate": 0.00015000000000000001,
"loss": 2.4364,
"step": 15
},
{
"epoch": 0.10278372591006424,
"grad_norm": 1.6497728824615479,
"learning_rate": 0.00018,
"loss": 2.1518,
"step": 18
},
{
"epoch": 0.10278372591006424,
"eval_loss": 2.1508984565734863,
"eval_runtime": 65.7679,
"eval_samples_per_second": 8.971,
"eval_steps_per_second": 4.485,
"step": 18
},
{
"epoch": 0.11991434689507495,
"grad_norm": 1.816499948501587,
"learning_rate": 0.0001999229036240723,
"loss": 2.1273,
"step": 21
},
{
"epoch": 0.13704496788008566,
"grad_norm": 2.241565227508545,
"learning_rate": 0.00019876883405951377,
"loss": 2.0534,
"step": 24
},
{
"epoch": 0.15417558886509636,
"grad_norm": 1.3755393028259277,
"learning_rate": 0.00019624552364536473,
"loss": 2.0302,
"step": 27
},
{
"epoch": 0.15417558886509636,
"eval_loss": 2.0600290298461914,
"eval_runtime": 65.7463,
"eval_samples_per_second": 8.974,
"eval_steps_per_second": 4.487,
"step": 27
},
{
"epoch": 0.17130620985010706,
"grad_norm": 1.3961236476898193,
"learning_rate": 0.0001923879532511287,
"loss": 2.0442,
"step": 30
},
{
"epoch": 0.18843683083511778,
"grad_norm": 1.2159768342971802,
"learning_rate": 0.00018724960070727972,
"loss": 1.9398,
"step": 33
},
{
"epoch": 0.20556745182012848,
"grad_norm": 1.243770718574524,
"learning_rate": 0.00018090169943749476,
"loss": 2.0249,
"step": 36
},
{
"epoch": 0.20556745182012848,
"eval_loss": 2.0228545665740967,
"eval_runtime": 65.7648,
"eval_samples_per_second": 8.971,
"eval_steps_per_second": 4.486,
"step": 36
},
{
"epoch": 0.22269807280513917,
"grad_norm": 1.321190595626831,
"learning_rate": 0.00017343225094356855,
"loss": 2.0458,
"step": 39
},
{
"epoch": 0.2398286937901499,
"grad_norm": 1.0691664218902588,
"learning_rate": 0.00016494480483301836,
"loss": 1.9291,
"step": 42
},
{
"epoch": 0.2569593147751606,
"grad_norm": 1.1871341466903687,
"learning_rate": 0.00015555702330196023,
"loss": 1.9359,
"step": 45
},
{
"epoch": 0.2569593147751606,
"eval_loss": 2.0000336170196533,
"eval_runtime": 65.6889,
"eval_samples_per_second": 8.982,
"eval_steps_per_second": 4.491,
"step": 45
},
{
"epoch": 0.2740899357601713,
"grad_norm": 1.500360131263733,
"learning_rate": 0.00014539904997395468,
"loss": 2.0105,
"step": 48
},
{
"epoch": 0.291220556745182,
"grad_norm": 1.301650881767273,
"learning_rate": 0.0001346117057077493,
"loss": 1.9484,
"step": 51
},
{
"epoch": 0.3083511777301927,
"grad_norm": 1.1405830383300781,
"learning_rate": 0.00012334453638559057,
"loss": 1.8932,
"step": 54
},
{
"epoch": 0.3083511777301927,
"eval_loss": 1.986473798751831,
"eval_runtime": 65.7166,
"eval_samples_per_second": 8.978,
"eval_steps_per_second": 4.489,
"step": 54
},
{
"epoch": 0.32548179871520344,
"grad_norm": 1.0970661640167236,
"learning_rate": 0.00011175373974578378,
"loss": 1.9572,
"step": 57
},
{
"epoch": 0.3426124197002141,
"grad_norm": 1.1758753061294556,
"learning_rate": 0.0001,
"loss": 2.0218,
"step": 60
},
{
"epoch": 0.35974304068522484,
"grad_norm": 1.2941383123397827,
"learning_rate": 8.824626025421626e-05,
"loss": 1.9693,
"step": 63
},
{
"epoch": 0.35974304068522484,
"eval_loss": 1.9790188074111938,
"eval_runtime": 65.5469,
"eval_samples_per_second": 9.001,
"eval_steps_per_second": 4.501,
"step": 63
},
{
"epoch": 0.37687366167023556,
"grad_norm": 1.0595579147338867,
"learning_rate": 7.66554636144095e-05,
"loss": 1.9072,
"step": 66
},
{
"epoch": 0.39400428265524623,
"grad_norm": 1.2764127254486084,
"learning_rate": 6.538829429225069e-05,
"loss": 2.0075,
"step": 69
},
{
"epoch": 0.41113490364025695,
"grad_norm": 1.0607476234436035,
"learning_rate": 5.4600950026045326e-05,
"loss": 1.9258,
"step": 72
},
{
"epoch": 0.41113490364025695,
"eval_loss": 1.9672698974609375,
"eval_runtime": 65.5425,
"eval_samples_per_second": 9.002,
"eval_steps_per_second": 4.501,
"step": 72
},
{
"epoch": 0.4282655246252677,
"grad_norm": 1.078697919845581,
"learning_rate": 4.444297669803981e-05,
"loss": 1.9402,
"step": 75
},
{
"epoch": 0.44539614561027835,
"grad_norm": 1.0098077058792114,
"learning_rate": 3.5055195166981645e-05,
"loss": 1.947,
"step": 78
},
{
"epoch": 0.4625267665952891,
"grad_norm": 1.063792109489441,
"learning_rate": 2.6567749056431467e-05,
"loss": 1.9917,
"step": 81
},
{
"epoch": 0.4625267665952891,
"eval_loss": 1.962007999420166,
"eval_runtime": 65.5561,
"eval_samples_per_second": 9.0,
"eval_steps_per_second": 4.5,
"step": 81
},
{
"epoch": 0.4796573875802998,
"grad_norm": 1.0382856130599976,
"learning_rate": 1.9098300562505266e-05,
"loss": 2.0827,
"step": 84
},
{
"epoch": 0.49678800856531047,
"grad_norm": 1.0031726360321045,
"learning_rate": 1.2750399292720283e-05,
"loss": 1.9068,
"step": 87
},
{
"epoch": 0.5139186295503212,
"grad_norm": 1.1354076862335205,
"learning_rate": 7.612046748871327e-06,
"loss": 1.9417,
"step": 90
},
{
"epoch": 0.5139186295503212,
"eval_loss": 1.9562921524047852,
"eval_runtime": 65.5479,
"eval_samples_per_second": 9.001,
"eval_steps_per_second": 4.501,
"step": 90
},
{
"epoch": 0.5310492505353319,
"grad_norm": 1.0723165273666382,
"learning_rate": 3.7544763546352834e-06,
"loss": 1.894,
"step": 93
},
{
"epoch": 0.5481798715203426,
"grad_norm": 1.0611053705215454,
"learning_rate": 1.231165940486234e-06,
"loss": 1.8989,
"step": 96
},
{
"epoch": 0.5653104925053534,
"grad_norm": 1.0602564811706543,
"learning_rate": 7.709637592770991e-08,
"loss": 1.939,
"step": 99
},
{
"epoch": 0.5653104925053534,
"eval_loss": 1.9549332857131958,
"eval_runtime": 65.5189,
"eval_samples_per_second": 9.005,
"eval_steps_per_second": 4.503,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.968083617316864e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}