fpadovani's picture
Training in progress, step 44000, checkpoint
36e42b9 verified
raw
history blame
7.72 kB
{
"best_metric": 3.2130022048950195,
"best_model_checkpoint": "/home/p318482/babyLM_controlled/models_trained_recent/mlm_unmasking/en_mlm_new/childes_mlm_unmasking_sent_13/checkpoint-40000",
"epoch": 1.6295390996796475,
"eval_steps": 2000,
"global_step": 44000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07406995907634761,
"eval_loss": 5.4637370109558105,
"eval_runtime": 24.2102,
"eval_samples_per_second": 1237.041,
"eval_steps_per_second": 77.323,
"step": 2000
},
{
"epoch": 0.14813991815269523,
"grad_norm": 4.785717487335205,
"learning_rate": 4e-05,
"loss": 6.1476,
"step": 4000
},
{
"epoch": 0.14813991815269523,
"eval_loss": 4.718623161315918,
"eval_runtime": 24.2123,
"eval_samples_per_second": 1236.934,
"eval_steps_per_second": 77.316,
"step": 4000
},
{
"epoch": 0.22220987722904284,
"eval_loss": 4.352034091949463,
"eval_runtime": 24.1788,
"eval_samples_per_second": 1238.646,
"eval_steps_per_second": 77.423,
"step": 6000
},
{
"epoch": 0.29627983630539045,
"grad_norm": 7.057693004608154,
"learning_rate": 8e-05,
"loss": 4.4471,
"step": 8000
},
{
"epoch": 0.29627983630539045,
"eval_loss": 3.99312686920166,
"eval_runtime": 24.1035,
"eval_samples_per_second": 1242.518,
"eval_steps_per_second": 77.665,
"step": 8000
},
{
"epoch": 0.37034979538173807,
"eval_loss": 3.763000965118408,
"eval_runtime": 24.2844,
"eval_samples_per_second": 1233.262,
"eval_steps_per_second": 77.087,
"step": 10000
},
{
"epoch": 0.4444197544580857,
"grad_norm": 8.325577735900879,
"learning_rate": 0.00012,
"loss": 3.8939,
"step": 12000
},
{
"epoch": 0.4444197544580857,
"eval_loss": 3.6284842491149902,
"eval_runtime": 24.245,
"eval_samples_per_second": 1235.265,
"eval_steps_per_second": 77.212,
"step": 12000
},
{
"epoch": 0.5184897135344333,
"eval_loss": 3.4887945652008057,
"eval_runtime": 24.2373,
"eval_samples_per_second": 1235.655,
"eval_steps_per_second": 77.236,
"step": 14000
},
{
"epoch": 0.5925596726107809,
"grad_norm": 10.0059232711792,
"learning_rate": 0.00016,
"loss": 3.6013,
"step": 16000
},
{
"epoch": 0.5925596726107809,
"eval_loss": 3.443763256072998,
"eval_runtime": 24.5656,
"eval_samples_per_second": 1219.141,
"eval_steps_per_second": 76.204,
"step": 16000
},
{
"epoch": 0.6666296316871285,
"eval_loss": 3.3501055240631104,
"eval_runtime": 24.1675,
"eval_samples_per_second": 1239.227,
"eval_steps_per_second": 77.459,
"step": 18000
},
{
"epoch": 0.7406995907634761,
"grad_norm": 5.149896621704102,
"learning_rate": 0.0002,
"loss": 3.4806,
"step": 20000
},
{
"epoch": 0.7406995907634761,
"eval_loss": 3.362109899520874,
"eval_runtime": 24.3668,
"eval_samples_per_second": 1229.089,
"eval_steps_per_second": 76.826,
"step": 20000
},
{
"epoch": 0.8147695498398237,
"eval_loss": 3.317254066467285,
"eval_runtime": 24.2052,
"eval_samples_per_second": 1237.297,
"eval_steps_per_second": 77.339,
"step": 22000
},
{
"epoch": 0.8888395089161714,
"grad_norm": 6.9324188232421875,
"learning_rate": 0.00024,
"loss": 3.4164,
"step": 24000
},
{
"epoch": 0.8888395089161714,
"eval_loss": 3.2997493743896484,
"eval_runtime": 24.1632,
"eval_samples_per_second": 1239.447,
"eval_steps_per_second": 77.473,
"step": 24000
},
{
"epoch": 0.962909467992519,
"eval_loss": 3.279545545578003,
"eval_runtime": 24.2189,
"eval_samples_per_second": 1236.597,
"eval_steps_per_second": 77.295,
"step": 26000
},
{
"epoch": 1.0369794270688666,
"grad_norm": 6.834834098815918,
"learning_rate": 0.00028000000000000003,
"loss": 3.3837,
"step": 28000
},
{
"epoch": 1.0369794270688666,
"eval_loss": 3.295776844024658,
"eval_runtime": 24.2221,
"eval_samples_per_second": 1236.433,
"eval_steps_per_second": 77.285,
"step": 28000
},
{
"epoch": 1.1110493861452142,
"eval_loss": 3.2695140838623047,
"eval_runtime": 24.1872,
"eval_samples_per_second": 1238.217,
"eval_steps_per_second": 77.396,
"step": 30000
},
{
"epoch": 1.1851193452215618,
"grad_norm": 5.360170841217041,
"learning_rate": 0.00032,
"loss": 3.3341,
"step": 32000
},
{
"epoch": 1.1851193452215618,
"eval_loss": 3.2437243461608887,
"eval_runtime": 24.189,
"eval_samples_per_second": 1238.122,
"eval_steps_per_second": 77.39,
"step": 32000
},
{
"epoch": 1.2591893042979094,
"eval_loss": 3.2345123291015625,
"eval_runtime": 24.206,
"eval_samples_per_second": 1237.255,
"eval_steps_per_second": 77.336,
"step": 34000
},
{
"epoch": 1.333259263374257,
"grad_norm": 5.8984575271606445,
"learning_rate": 0.00035999999999999997,
"loss": 3.332,
"step": 36000
},
{
"epoch": 1.333259263374257,
"eval_loss": 3.215236186981201,
"eval_runtime": 24.2072,
"eval_samples_per_second": 1237.196,
"eval_steps_per_second": 77.332,
"step": 36000
},
{
"epoch": 1.4073292224506047,
"eval_loss": 3.2174911499023438,
"eval_runtime": 24.2689,
"eval_samples_per_second": 1234.051,
"eval_steps_per_second": 77.136,
"step": 38000
},
{
"epoch": 1.4813991815269523,
"grad_norm": 7.638304710388184,
"learning_rate": 0.0004,
"loss": 3.3094,
"step": 40000
},
{
"epoch": 1.4813991815269523,
"eval_loss": 3.2130022048950195,
"eval_runtime": 28.0919,
"eval_samples_per_second": 1066.108,
"eval_steps_per_second": 66.638,
"step": 40000
},
{
"epoch": 1.5554691406032997,
"eval_loss": 3.2210235595703125,
"eval_runtime": 27.2599,
"eval_samples_per_second": 1098.646,
"eval_steps_per_second": 68.672,
"step": 42000
},
{
"epoch": 1.6295390996796475,
"grad_norm": 4.747150421142578,
"learning_rate": 0.00044,
"loss": 3.3162,
"step": 44000
},
{
"epoch": 1.6295390996796475,
"eval_loss": NaN,
"eval_runtime": 28.3768,
"eval_samples_per_second": 1055.403,
"eval_steps_per_second": 65.969,
"step": 44000
}
],
"logging_steps": 4000,
"max_steps": 400000,
"num_input_tokens_seen": 0,
"num_train_epochs": 15,
"save_steps": 4000,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 3,
"early_stopping_threshold": 0.001
},
"attributes": {
"early_stopping_patience_counter": 2
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9180817918525440.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}