File size: 4,061 Bytes
8218094 8c573d1 8218094 8c573d1 8218094 2cdb159 1022150 8c573d1 8218094 8c573d1 8218094 8c573d1 8218094 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.001620285980475554,
"eval_steps": 10,
"global_step": 40,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 4.050714951188885e-05,
"eval_loss": 2.130146026611328,
"eval_runtime": 332.2487,
"eval_samples_per_second": 7.822,
"eval_steps_per_second": 3.913,
"step": 1
},
{
"epoch": 0.00012152144853566654,
"grad_norm": 1.2408714294433594,
"learning_rate": 6e-05,
"loss": 2.1776,
"step": 3
},
{
"epoch": 0.00024304289707133308,
"grad_norm": 1.1398706436157227,
"learning_rate": 0.00012,
"loss": 2.3135,
"step": 6
},
{
"epoch": 0.0003645643456069996,
"grad_norm": 1.4346201419830322,
"learning_rate": 0.00018,
"loss": 2.037,
"step": 9
},
{
"epoch": 0.0004050714951188885,
"eval_loss": 2.0305228233337402,
"eval_runtime": 335.7037,
"eval_samples_per_second": 7.742,
"eval_steps_per_second": 3.872,
"step": 10
},
{
"epoch": 0.00048608579414266616,
"grad_norm": 1.5074211359024048,
"learning_rate": 0.00019781476007338058,
"loss": 2.0111,
"step": 12
},
{
"epoch": 0.0006076072426783327,
"grad_norm": 1.5198981761932373,
"learning_rate": 0.00018660254037844388,
"loss": 2.1894,
"step": 15
},
{
"epoch": 0.0007291286912139992,
"grad_norm": 1.4036415815353394,
"learning_rate": 0.00016691306063588583,
"loss": 2.1162,
"step": 18
},
{
"epoch": 0.000810142990237777,
"eval_loss": 1.9362800121307373,
"eval_runtime": 331.4839,
"eval_samples_per_second": 7.841,
"eval_steps_per_second": 3.922,
"step": 20
},
{
"epoch": 0.0008506501397496658,
"grad_norm": 1.4190822839736938,
"learning_rate": 0.00014067366430758004,
"loss": 1.7328,
"step": 21
},
{
"epoch": 0.0009721715882853323,
"grad_norm": 1.5082824230194092,
"learning_rate": 0.00011045284632676536,
"loss": 1.7984,
"step": 24
},
{
"epoch": 0.001093693036820999,
"grad_norm": 1.2184019088745117,
"learning_rate": 7.920883091822408e-05,
"loss": 1.852,
"step": 27
},
{
"epoch": 0.0012152144853566655,
"grad_norm": 1.190232515335083,
"learning_rate": 5.000000000000002e-05,
"loss": 1.8741,
"step": 30
},
{
"epoch": 0.0012152144853566655,
"eval_loss": 1.899240255355835,
"eval_runtime": 334.4518,
"eval_samples_per_second": 7.771,
"eval_steps_per_second": 3.887,
"step": 30
},
{
"epoch": 0.001336735933892332,
"grad_norm": 1.4657238721847534,
"learning_rate": 2.5685517452260567e-05,
"loss": 2.0548,
"step": 33
},
{
"epoch": 0.0014582573824279985,
"grad_norm": 1.554992437362671,
"learning_rate": 8.645454235739903e-06,
"loss": 2.1922,
"step": 36
},
{
"epoch": 0.001579778830963665,
"grad_norm": 1.369084358215332,
"learning_rate": 5.478104631726711e-07,
"loss": 1.8381,
"step": 39
},
{
"epoch": 0.001620285980475554,
"eval_loss": 1.891857385635376,
"eval_runtime": 335.6622,
"eval_samples_per_second": 7.743,
"eval_steps_per_second": 3.873,
"step": 40
}
],
"logging_steps": 3,
"max_steps": 40,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6120570089373696.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}
|