Nexspear commited on
Commit
e3e6dcf
·
verified ·
1 Parent(s): d1a7a25

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:850d08c0b5b47e4e107b4ede3a5e2cf7614121cd4020bbf615335082c1b407ec
3
  size 27024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b8cb19ec64ff49fcd8f4cd7afd0eb394a8627690fe08f2ff739c507bff2e1b8
3
  size 27024
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d55cdaa202d979a301fdc89c38f1de0df70821444024657e10e9fb1f0fdcbdbe
3
  size 63974
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6e4a0e3723e6e875ac0da10b839e26a795a08b78cddcb7b4217a3f81ecef5a0
3
  size 63974
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2fea276383daa6be11cb41bcf079e20917a66b8a1df06264809ff50445c989c9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70bb08e79cd87e26411515d4cfe540a1dfafd998745c321dfb4680b9ed334a58
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d754412c61116546142914503e7369d0cc35d3c380a07e5218f595d76b6d96
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.06379585326953748,
5
  "eval_steps": 50,
6
- "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -249,6 +249,84 @@
249
  "eval_samples_per_second": 309.608,
250
  "eval_steps_per_second": 154.804,
251
  "step": 150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
  }
253
  ],
254
  "logging_steps": 5,
@@ -263,12 +341,12 @@
263
  "should_evaluate": false,
264
  "should_log": false,
265
  "should_save": true,
266
- "should_training_stop": false
267
  },
268
  "attributes": {}
269
  }
270
  },
271
- "total_flos": 4066770616320.0,
272
  "train_batch_size": 2,
273
  "trial_name": null,
274
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08506113769271664,
5
  "eval_steps": 50,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
249
  "eval_samples_per_second": 309.608,
250
  "eval_steps_per_second": 154.804,
251
  "step": 150
252
+ },
253
+ {
254
+ "epoch": 0.0659223817118554,
255
+ "grad_norm": 0.03459914028644562,
256
+ "learning_rate": 1.3213804466343421e-05,
257
+ "loss": 10.3752,
258
+ "step": 155
259
+ },
260
+ {
261
+ "epoch": 0.06804891015417332,
262
+ "grad_norm": 0.026126541197299957,
263
+ "learning_rate": 1.0542974530180327e-05,
264
+ "loss": 10.3767,
265
+ "step": 160
266
+ },
267
+ {
268
+ "epoch": 0.07017543859649122,
269
+ "grad_norm": 0.02168152667582035,
270
+ "learning_rate": 8.141676086873572e-06,
271
+ "loss": 10.3748,
272
+ "step": 165
273
+ },
274
+ {
275
+ "epoch": 0.07230196703880915,
276
+ "grad_norm": 0.030395662412047386,
277
+ "learning_rate": 6.026312439675552e-06,
278
+ "loss": 10.3775,
279
+ "step": 170
280
+ },
281
+ {
282
+ "epoch": 0.07442849548112707,
283
+ "grad_norm": 0.02148647978901863,
284
+ "learning_rate": 4.2113336672471245e-06,
285
+ "loss": 10.3768,
286
+ "step": 175
287
+ },
288
+ {
289
+ "epoch": 0.07655502392344497,
290
+ "grad_norm": 0.027581162750720978,
291
+ "learning_rate": 2.7091379149682685e-06,
292
+ "loss": 10.3745,
293
+ "step": 180
294
+ },
295
+ {
296
+ "epoch": 0.0786815523657629,
297
+ "grad_norm": 0.019557451829314232,
298
+ "learning_rate": 1.5299867030334814e-06,
299
+ "loss": 10.3774,
300
+ "step": 185
301
+ },
302
+ {
303
+ "epoch": 0.08080808080808081,
304
+ "grad_norm": 0.0335659459233284,
305
+ "learning_rate": 6.819348298638839e-07,
306
+ "loss": 10.3748,
307
+ "step": 190
308
+ },
309
+ {
310
+ "epoch": 0.08293460925039872,
311
+ "grad_norm": 0.018269188702106476,
312
+ "learning_rate": 1.7077534966650766e-07,
313
+ "loss": 10.3759,
314
+ "step": 195
315
+ },
316
+ {
317
+ "epoch": 0.08506113769271664,
318
+ "grad_norm": 0.025745518505573273,
319
+ "learning_rate": 0.0,
320
+ "loss": 10.3751,
321
+ "step": 200
322
+ },
323
+ {
324
+ "epoch": 0.08506113769271664,
325
+ "eval_loss": 10.376242637634277,
326
+ "eval_runtime": 3.2404,
327
+ "eval_samples_per_second": 305.514,
328
+ "eval_steps_per_second": 152.757,
329
+ "step": 200
330
  }
331
  ],
332
  "logging_steps": 5,
 
341
  "should_evaluate": false,
342
  "should_log": false,
343
  "should_save": true,
344
+ "should_training_stop": true
345
  },
346
  "attributes": {}
347
  }
348
  },
349
+ "total_flos": 5437446881280.0,
350
  "train_batch_size": 2,
351
  "trial_name": null,
352
  "trial_params": null