rizkyjun commited on
Commit
82c4c0f
1 Parent(s): d4cc36c

Training in progress, step 1700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8e5c4fba58c6bf9390a0338353e80e91f548009bff8fd02ccc563c1546569e22
3
  size 31466288
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03ff1d24dc08b8a25c2646f8e26c388e1283379e780176ed6477876bbe73dbd0
3
  size 31466288
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92cf4e040cee5763a4b906acfa31515b86ab8d763bf97cb77d72691896e9b9ff
3
  size 62950917
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1bf5ef5d71e8bb32c126db72ec5f530ec3327edac61ceb1e2de142e4123200c
3
  size 62950917
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:50dbb2327841dc1a7fe1a7d871fa34edd97692f38f48af326a03199ff55031f5
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa7d14b07ebf6723d333e229fed51fa89ac2ed397877a31e17f66285a4d818d3
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fcd2587f362188ac4728d4fa6edf8d2b0b6d72db365d49f7b847d4d79e3da09f
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0410ad60e6d0138b921a03b0a3e367fd27c6ab07cb9a5006fcb66ea8e5bbacc4
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6322499513626099,
3
- "best_model_checkpoint": "./outputs/checkpoint-1600",
4
- "epoch": 1.1657559198542806,
5
  "eval_steps": 100,
6
- "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -231,13 +231,27 @@
231
  "eval_samples_per_second": 11.318,
232
  "eval_steps_per_second": 1.416,
233
  "step": 1600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  }
235
  ],
236
  "logging_steps": 100,
237
  "max_steps": 4116,
238
  "num_train_epochs": 3,
239
  "save_steps": 100,
240
- "total_flos": 4.172858875396915e+17,
241
  "trial_name": null,
242
  "trial_params": null
243
  }
 
1
  {
2
+ "best_metric": 1.6027921438217163,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1700",
4
+ "epoch": 1.238615664845173,
5
  "eval_steps": 100,
6
+ "global_step": 1700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
231
  "eval_samples_per_second": 11.318,
232
  "eval_steps_per_second": 1.416,
233
  "step": 1600
234
+ },
235
+ {
236
+ "epoch": 1.24,
237
+ "learning_rate": 0.0002,
238
+ "loss": 1.5939,
239
+ "step": 1700
240
+ },
241
+ {
242
+ "epoch": 1.24,
243
+ "eval_loss": 1.6027921438217163,
244
+ "eval_runtime": 555.8406,
245
+ "eval_samples_per_second": 11.287,
246
+ "eval_steps_per_second": 1.412,
247
+ "step": 1700
248
  }
249
  ],
250
  "logging_steps": 100,
251
  "max_steps": 4116,
252
  "num_train_epochs": 3,
253
  "save_steps": 100,
254
+ "total_flos": 4.434655082491576e+17,
255
  "trial_name": null,
256
  "trial_params": null
257
  }