File size: 3,511 Bytes
1a9c3ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9987257088244665,
"eval_steps": 1569,
"global_step": 6276,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.24976107040458745,
"grad_norm": 3.5836572647094727,
"learning_rate": 2.4984066284257492e-06,
"loss": 3.2128,
"step": 392
},
{
"epoch": 0.4995221408091749,
"grad_norm": 3.5956788063049316,
"learning_rate": 4.9968132568514985e-06,
"loss": 2.232,
"step": 784
},
{
"epoch": 0.7492832112137623,
"grad_norm": 5.461869239807129,
"learning_rate": 7.495219885277247e-06,
"loss": 1.8585,
"step": 1176
},
{
"epoch": 0.9990442816183498,
"grad_norm": 4.789157390594482,
"learning_rate": 9.993626513702997e-06,
"loss": 1.663,
"step": 1568
},
{
"epoch": 1.2488053520229372,
"grad_norm": 3.923671245574951,
"learning_rate": 9.37719018795795e-06,
"loss": 1.5195,
"step": 1960
},
{
"epoch": 1.4985664224275248,
"grad_norm": 4.947878360748291,
"learning_rate": 8.75278751194648e-06,
"loss": 1.481,
"step": 2352
},
{
"epoch": 1.7483274928321122,
"grad_norm": 3.5249428749084473,
"learning_rate": 8.128384835935011e-06,
"loss": 1.4036,
"step": 2744
},
{
"epoch": 1.9980885632366996,
"grad_norm": 1.927209734916687,
"learning_rate": 7.503982159923543e-06,
"loss": 1.3675,
"step": 3136
},
{
"epoch": 2.247849633641287,
"grad_norm": 5.133701324462891,
"learning_rate": 6.8795794839120745e-06,
"loss": 1.2484,
"step": 3528
},
{
"epoch": 2.4976107040458744,
"grad_norm": 5.654458045959473,
"learning_rate": 6.255176807900606e-06,
"loss": 1.1997,
"step": 3920
},
{
"epoch": 2.7473717744504618,
"grad_norm": 7.116914749145508,
"learning_rate": 5.630774131889137e-06,
"loss": 1.2026,
"step": 4312
},
{
"epoch": 2.9971328448550496,
"grad_norm": 5.798983573913574,
"learning_rate": 5.006371455877669e-06,
"loss": 1.1859,
"step": 4704
},
{
"epoch": 3.246893915259637,
"grad_norm": 5.535274505615234,
"learning_rate": 4.3819687798662e-06,
"loss": 1.0458,
"step": 5096
},
{
"epoch": 3.4966549856642244,
"grad_norm": 5.662832736968994,
"learning_rate": 3.757566103854731e-06,
"loss": 1.0485,
"step": 5488
},
{
"epoch": 3.746416056068812,
"grad_norm": 5.849664211273193,
"learning_rate": 3.1331634278432627e-06,
"loss": 1.0564,
"step": 5880
},
{
"epoch": 3.996177126473399,
"grad_norm": 6.500792026519775,
"learning_rate": 2.5087607518317936e-06,
"loss": 1.0403,
"step": 6272
}
],
"logging_steps": 392,
"max_steps": 7847,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 1569,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|