brixeus commited on
Commit
06dee7f
·
verified ·
1 Parent(s): 1313593

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5321b9b24d87cea0ab8ca473e5e824d7f9c33fb0c2b4ecd89f0464cf80f5f883
3
  size 1163996488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29823f4e08b18743fec2278d0a24fba91846a1bbe1838f5c71ac01c15fea6da1
3
  size 1163996488
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4e3002892411f1f6dd3862afccbc704c1e716a0cd285403e57f845ea26a7f3a
3
  size 325339796
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2febce13ec52a548e34b9db0f50c7fb2dfb945379b97f53fb52a73c2635c2f2c
3
  size 325339796
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5c7b0794dc4da930fd6bdb7425e203350d470d04552fbe2ccdb7d7390d92b97f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4787c2c04c07c1ae6e2c20c42952d1e91343f26f99f548d3534a1670bc4a5f0c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ddb9588ea654e56e83effcf81a2bc03480954babcf6415cb44d41d3bfb8039f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8ce05761f46e7cf72fb17a02e3a0ca15c9d25ce3babf590eeb40568923b8bac
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.9572558403015137,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
- "epoch": 0.22560631697687536,
5
  "eval_steps": 50,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -262,6 +262,133 @@
262
  "eval_samples_per_second": 13.46,
263
  "eval_steps_per_second": 3.369,
264
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  }
266
  ],
267
  "logging_steps": 3,
@@ -290,7 +417,7 @@
290
  "attributes": {}
291
  }
292
  },
293
- "total_flos": 1.330496579567616e+17,
294
  "train_batch_size": 8,
295
  "trial_name": null,
296
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.613823652267456,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-150",
4
+ "epoch": 0.338409475465313,
5
  "eval_steps": 50,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
262
  "eval_samples_per_second": 13.46,
263
  "eval_steps_per_second": 3.369,
264
  "step": 100
265
+ },
266
+ {
267
+ "epoch": 0.23011844331641285,
268
+ "grad_norm": 2.131408929824829,
269
+ "learning_rate": 5.247918773366112e-05,
270
+ "loss": 2.2441,
271
+ "step": 102
272
+ },
273
+ {
274
+ "epoch": 0.23688663282571912,
275
+ "grad_norm": 2.081399440765381,
276
+ "learning_rate": 5e-05,
277
+ "loss": 2.3219,
278
+ "step": 105
279
+ },
280
+ {
281
+ "epoch": 0.2436548223350254,
282
+ "grad_norm": 1.4835395812988281,
283
+ "learning_rate": 4.7520812266338885e-05,
284
+ "loss": 2.176,
285
+ "step": 108
286
+ },
287
+ {
288
+ "epoch": 0.25042301184433163,
289
+ "grad_norm": 1.476472020149231,
290
+ "learning_rate": 4.504772348747687e-05,
291
+ "loss": 1.9463,
292
+ "step": 111
293
+ },
294
+ {
295
+ "epoch": 0.2571912013536379,
296
+ "grad_norm": 1.4163830280303955,
297
+ "learning_rate": 4.2586817614407895e-05,
298
+ "loss": 2.0727,
299
+ "step": 114
300
+ },
301
+ {
302
+ "epoch": 0.2639593908629442,
303
+ "grad_norm": 1.5342260599136353,
304
+ "learning_rate": 4.0144148627425993e-05,
305
+ "loss": 1.858,
306
+ "step": 117
307
+ },
308
+ {
309
+ "epoch": 0.2707275803722504,
310
+ "grad_norm": 1.5253146886825562,
311
+ "learning_rate": 3.772572564296005e-05,
312
+ "loss": 1.8902,
313
+ "step": 120
314
+ },
315
+ {
316
+ "epoch": 0.27749576988155666,
317
+ "grad_norm": 1.7602707147598267,
318
+ "learning_rate": 3.533749813077677e-05,
319
+ "loss": 1.6547,
320
+ "step": 123
321
+ },
322
+ {
323
+ "epoch": 0.28426395939086296,
324
+ "grad_norm": 1.6635607481002808,
325
+ "learning_rate": 3.298534127791785e-05,
326
+ "loss": 1.8572,
327
+ "step": 126
328
+ },
329
+ {
330
+ "epoch": 0.2910321489001692,
331
+ "grad_norm": 1.924639105796814,
332
+ "learning_rate": 3.0675041535377405e-05,
333
+ "loss": 1.8875,
334
+ "step": 129
335
+ },
336
+ {
337
+ "epoch": 0.29780033840947545,
338
+ "grad_norm": 1.8324716091156006,
339
+ "learning_rate": 2.8412282383075363e-05,
340
+ "loss": 1.5726,
341
+ "step": 132
342
+ },
343
+ {
344
+ "epoch": 0.30456852791878175,
345
+ "grad_norm": 2.137519121170044,
346
+ "learning_rate": 2.6202630348146324e-05,
347
+ "loss": 1.6311,
348
+ "step": 135
349
+ },
350
+ {
351
+ "epoch": 0.311336717428088,
352
+ "grad_norm": 2.6914830207824707,
353
+ "learning_rate": 2.405152131093926e-05,
354
+ "loss": 1.8127,
355
+ "step": 138
356
+ },
357
+ {
358
+ "epoch": 0.31810490693739424,
359
+ "grad_norm": 2.32242751121521,
360
+ "learning_rate": 2.196424713241637e-05,
361
+ "loss": 1.536,
362
+ "step": 141
363
+ },
364
+ {
365
+ "epoch": 0.3248730964467005,
366
+ "grad_norm": 2.783191204071045,
367
+ "learning_rate": 1.9945942635848748e-05,
368
+ "loss": 1.6442,
369
+ "step": 144
370
+ },
371
+ {
372
+ "epoch": 0.3316412859560068,
373
+ "grad_norm": 3.8185131549835205,
374
+ "learning_rate": 1.800157297483417e-05,
375
+ "loss": 1.5812,
376
+ "step": 147
377
+ },
378
+ {
379
+ "epoch": 0.338409475465313,
380
+ "grad_norm": 4.207033157348633,
381
+ "learning_rate": 1.6135921418712956e-05,
382
+ "loss": 1.4215,
383
+ "step": 150
384
+ },
385
+ {
386
+ "epoch": 0.338409475465313,
387
+ "eval_loss": 1.613823652267456,
388
+ "eval_runtime": 55.6384,
389
+ "eval_samples_per_second": 13.426,
390
+ "eval_steps_per_second": 3.361,
391
+ "step": 150
392
  }
393
  ],
394
  "logging_steps": 3,
 
417
  "attributes": {}
418
  }
419
  },
420
+ "total_flos": 1.995744869351424e+17,
421
  "train_batch_size": 8,
422
  "trial_name": null,
423
  "trial_params": null