adammandic87's picture
Training in progress, step 200, checkpoint
a936ed7 verified
raw
history blame
5.05 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.006073581439135122,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 3.036790719567561e-05,
"eval_loss": 0.8727558255195618,
"eval_runtime": 1304.3872,
"eval_samples_per_second": 10.63,
"eval_steps_per_second": 5.315,
"step": 1
},
{
"epoch": 0.0003036790719567561,
"grad_norm": 0.7052744626998901,
"learning_rate": 0.0002,
"loss": 0.7843,
"step": 10
},
{
"epoch": 0.0006073581439135122,
"grad_norm": 0.42328405380249023,
"learning_rate": 0.0002,
"loss": 0.6129,
"step": 20
},
{
"epoch": 0.0009110372158702683,
"grad_norm": 0.4494202435016632,
"learning_rate": 0.0002,
"loss": 0.5938,
"step": 30
},
{
"epoch": 0.0012147162878270244,
"grad_norm": 0.5013023614883423,
"learning_rate": 0.0002,
"loss": 0.6252,
"step": 40
},
{
"epoch": 0.0015183953597837805,
"grad_norm": 0.473978728055954,
"learning_rate": 0.0002,
"loss": 0.6614,
"step": 50
},
{
"epoch": 0.0015183953597837805,
"eval_loss": 0.6444454193115234,
"eval_runtime": 1304.5841,
"eval_samples_per_second": 10.628,
"eval_steps_per_second": 5.314,
"step": 50
},
{
"epoch": 0.0018220744317405367,
"grad_norm": 0.6711248159408569,
"learning_rate": 0.0002,
"loss": 0.7031,
"step": 60
},
{
"epoch": 0.002125753503697293,
"grad_norm": 0.5438421964645386,
"learning_rate": 0.0002,
"loss": 0.6367,
"step": 70
},
{
"epoch": 0.0024294325756540488,
"grad_norm": 0.5609766840934753,
"learning_rate": 0.0002,
"loss": 0.6457,
"step": 80
},
{
"epoch": 0.0027331116476108047,
"grad_norm": 0.773842990398407,
"learning_rate": 0.0002,
"loss": 0.6022,
"step": 90
},
{
"epoch": 0.003036790719567561,
"grad_norm": 0.4400677978992462,
"learning_rate": 0.0002,
"loss": 0.594,
"step": 100
},
{
"epoch": 0.003036790719567561,
"eval_loss": 0.6259578466415405,
"eval_runtime": 1306.1326,
"eval_samples_per_second": 10.615,
"eval_steps_per_second": 5.308,
"step": 100
},
{
"epoch": 0.003340469791524317,
"grad_norm": 0.5265600681304932,
"learning_rate": 0.0002,
"loss": 0.6202,
"step": 110
},
{
"epoch": 0.0036441488634810734,
"grad_norm": 0.6035001277923584,
"learning_rate": 0.0002,
"loss": 0.5943,
"step": 120
},
{
"epoch": 0.00394782793543783,
"grad_norm": 0.5358197689056396,
"learning_rate": 0.0002,
"loss": 0.575,
"step": 130
},
{
"epoch": 0.004251507007394586,
"grad_norm": 0.7852948904037476,
"learning_rate": 0.0002,
"loss": 0.6843,
"step": 140
},
{
"epoch": 0.004555186079351342,
"grad_norm": 0.5582488179206848,
"learning_rate": 0.0002,
"loss": 0.6097,
"step": 150
},
{
"epoch": 0.004555186079351342,
"eval_loss": 0.6131629347801208,
"eval_runtime": 1306.016,
"eval_samples_per_second": 10.616,
"eval_steps_per_second": 5.309,
"step": 150
},
{
"epoch": 0.0048588651513080975,
"grad_norm": 0.6850274801254272,
"learning_rate": 0.0002,
"loss": 0.5936,
"step": 160
},
{
"epoch": 0.0051625442232648535,
"grad_norm": 0.7021895051002502,
"learning_rate": 0.0002,
"loss": 0.6144,
"step": 170
},
{
"epoch": 0.005466223295221609,
"grad_norm": 0.575738251209259,
"learning_rate": 0.0002,
"loss": 0.6461,
"step": 180
},
{
"epoch": 0.005769902367178366,
"grad_norm": 0.6139180064201355,
"learning_rate": 0.0002,
"loss": 0.5935,
"step": 190
},
{
"epoch": 0.006073581439135122,
"grad_norm": 0.534476101398468,
"learning_rate": 0.0002,
"loss": 0.6254,
"step": 200
},
{
"epoch": 0.006073581439135122,
"eval_loss": 0.6035110354423523,
"eval_runtime": 1305.0507,
"eval_samples_per_second": 10.624,
"eval_steps_per_second": 5.312,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.20957446930432e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}