besimray commited on
Commit
29cabb1
·
verified ·
1 Parent(s): 1999f55

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ed9554c2937f51a4b5d0b83c40d075655345afe20ad24bd196c0d354b7ee6d1
3
  size 335604696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:121dc6327b5af86a847f01632d0e06385eb711c31e565456f5c6062f0cb1879f
3
  size 335604696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1365dae1b689bb4e2634db551077043caf2ad8fe42eccd1cf8743986544e61de
3
  size 170920084
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e512cc25240e50625a18a266122f40a09cb785b35b3c6f0fb2afd353cf2d837b
3
  size 170920084
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc1ac17452726e5cedfbf9cc2e40377e5a6129f49113fea151b529c4b6d68216
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ec7fddaa93b8a5104562fd596c16565abf5deb95425dbc4839aa86c8fa34863
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:321b462a2538632d6d720f0cf198c8f471dee11f51db9b50cc50d1fa7f132bbe
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.38599851727485657,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-10",
4
- "epoch": 0.018484288354898338,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,92 @@
101
  "eval_samples_per_second": 1.31,
102
  "eval_steps_per_second": 0.655,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -129,7 +215,7 @@
129
  "attributes": {}
130
  }
131
  },
132
- "total_flos": 1.414506945183744e+16,
133
  "train_batch_size": 2,
134
  "trial_name": null,
135
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.3295012414455414,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-20",
4
+ "epoch": 0.036968576709796676,
5
  "eval_steps": 5,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 1.31,
102
  "eval_steps_per_second": 0.655,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.02033271719038817,
107
+ "grad_norm": 3.8678717613220215,
108
+ "learning_rate": 0.0001999979446958366,
109
+ "loss": 1.6367,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.022181146025878003,
114
+ "grad_norm": 3.9537124633789062,
115
+ "learning_rate": 0.00019999177886783194,
116
+ "loss": 1.5529,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.024029574861367836,
121
+ "grad_norm": 5.052474498748779,
122
+ "learning_rate": 0.00019998150276943902,
123
+ "loss": 2.002,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.025878003696857672,
128
+ "grad_norm": 3.856994152069092,
129
+ "learning_rate": 0.000199967116823068,
130
+ "loss": 1.3814,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.027726432532347505,
135
+ "grad_norm": 4.525390625,
136
+ "learning_rate": 0.0001999486216200688,
137
+ "loss": 2.113,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.027726432532347505,
142
+ "eval_loss": 0.34281158447265625,
143
+ "eval_runtime": 172.8847,
144
+ "eval_samples_per_second": 1.319,
145
+ "eval_steps_per_second": 0.659,
146
+ "step": 15
147
+ },
148
+ {
149
+ "epoch": 0.029574861367837338,
150
+ "grad_norm": 2.5049190521240234,
151
+ "learning_rate": 0.00019992601792070679,
152
+ "loss": 1.1666,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.031423290203327174,
157
+ "grad_norm": 5.2193169593811035,
158
+ "learning_rate": 0.00019989930665413147,
159
+ "loss": 1.5721,
160
+ "step": 17
161
+ },
162
+ {
163
+ "epoch": 0.033271719038817,
164
+ "grad_norm": 3.3948168754577637,
165
+ "learning_rate": 0.00019986848891833845,
166
+ "loss": 0.8586,
167
+ "step": 18
168
+ },
169
+ {
170
+ "epoch": 0.03512014787430684,
171
+ "grad_norm": 3.3094377517700195,
172
+ "learning_rate": 0.0001998335659801241,
173
+ "loss": 0.989,
174
+ "step": 19
175
+ },
176
+ {
177
+ "epoch": 0.036968576709796676,
178
+ "grad_norm": 3.9788992404937744,
179
+ "learning_rate": 0.00019979453927503364,
180
+ "loss": 1.4677,
181
+ "step": 20
182
+ },
183
+ {
184
+ "epoch": 0.036968576709796676,
185
+ "eval_loss": 0.3295012414455414,
186
+ "eval_runtime": 174.1601,
187
+ "eval_samples_per_second": 1.309,
188
+ "eval_steps_per_second": 0.655,
189
+ "step": 20
190
  }
191
  ],
192
  "logging_steps": 1,
 
215
  "attributes": {}
216
  }
217
  },
218
+ "total_flos": 2.829013890367488e+16,
219
  "train_batch_size": 2,
220
  "trial_name": null,
221
  "trial_params": null