Hanzalwi commited on
Commit
c81c0bc
1 Parent(s): 94c644e

Training in progress, step 1700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d67b6d1e5ac08c55ca272dae5cef94538203ca81a7fd6189cc24a85e8020af1d
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8627ab63880fea3011447cb73f1dfbce83a72a704600888571a5d40e6758e417
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05dae8539f5c93edb2c67fdaaadbb3ffc382a05d59552d72ef43583b1403bbd1
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78b1bdef003d1399d3a6d37c6aa98d9120bd6ce3419fde88e8606366b6870ccd
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:59436ee7d20f5ba0ec694086a8e3e4769ad9219178f69a9c92d3d97e58bb26b2
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6598ba8d28457ca982679bb4678b929548195f1692dc85503bc0d791879d46e
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fcd2587f362188ac4728d4fa6edf8d2b0b6d72db365d49f7b847d4d79e3da09f
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0410ad60e6d0138b921a03b0a3e367fd27c6ab07cb9a5006fcb66ea8e5bbacc4
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.3913403749465942,
3
- "best_model_checkpoint": "./outputs/checkpoint-1600",
4
- "epoch": 2.1333333333333333,
5
  "eval_steps": 100,
6
- "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -231,6 +231,20 @@
231
  "eval_samples_per_second": 15.552,
232
  "eval_steps_per_second": 1.951,
233
  "step": 1600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  }
235
  ],
236
  "logging_steps": 100,
@@ -238,7 +252,7 @@
238
  "num_input_tokens_seen": 0,
239
  "num_train_epochs": 3,
240
  "save_steps": 100,
241
- "total_flos": 9.356583872176128e+16,
242
  "trial_name": null,
243
  "trial_params": null
244
  }
 
1
  {
2
+ "best_metric": 1.3872549533843994,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1700",
4
+ "epoch": 2.2666666666666666,
5
  "eval_steps": 100,
6
+ "global_step": 1700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
231
  "eval_samples_per_second": 15.552,
232
  "eval_steps_per_second": 1.951,
233
  "step": 1600
234
+ },
235
+ {
236
+ "epoch": 2.27,
237
+ "learning_rate": 0.0002,
238
+ "loss": 1.1354,
239
+ "step": 1700
240
+ },
241
+ {
242
+ "epoch": 2.27,
243
+ "eval_loss": 1.3872549533843994,
244
+ "eval_runtime": 93.3779,
245
+ "eval_samples_per_second": 15.539,
246
+ "eval_steps_per_second": 1.949,
247
+ "step": 1700
248
  }
249
  ],
250
  "logging_steps": 100,
 
252
  "num_input_tokens_seen": 0,
253
  "num_train_epochs": 3,
254
  "save_steps": 100,
255
+ "total_flos": 9.940658531844096e+16,
256
  "trial_name": null,
257
  "trial_params": null
258
  }