Hanzalwi commited on
Commit
b32bcb1
1 Parent(s): 2402874

Training in progress, step 1200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6cdebc96bbb12ba1dc5010d41742892ed7dd71d09701c694d110068729765cc
3
  size 19669752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ee698e51552725df8f9c0f41851647daba436baa2e23eb205a5a5bd4a081869
3
  size 19669752
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:155ee9c0bf029d819e901529b42f931c256a1d48a8d83fe1f9b4c82f009787b8
3
  size 39357829
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae6cb7c25ea569ea4fb85b227fc1bed4aa2fee0c0c1371e66682172a4bcbb49b
3
  size 39357829
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb85d675da79c9f128ba95f0e91ab83c431eb4fb41307c6d9686f45b7ede1b07
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb6764dc3e1fe015624f31dc2613389c8754599510448c4c64c983ace14a8916
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f5f3a2d7191245a70e1410d8951fd2f5c94b0b91f13bf39c3f76fe8aaf57fd4
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8d6be7898f87772ccbc5c732e900fe63a643c4595ce6af3d6bc6f811ba4b65
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.9812790751457214,
3
- "best_model_checkpoint": "./outputs/checkpoint-1100",
4
- "epoch": 1.4666666666666668,
5
  "eval_steps": 100,
6
- "global_step": 1100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -161,6 +161,20 @@
161
  "eval_samples_per_second": 2.656,
162
  "eval_steps_per_second": 0.333,
163
  "step": 1100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  }
165
  ],
166
  "logging_steps": 100,
@@ -168,7 +182,7 @@
168
  "num_input_tokens_seen": 0,
169
  "num_train_epochs": 3,
170
  "save_steps": 100,
171
- "total_flos": 2.8493512017395712e+17,
172
  "trial_name": null,
173
  "trial_params": null
174
  }
 
1
  {
2
+ "best_metric": 0.9755277633666992,
3
+ "best_model_checkpoint": "./outputs/checkpoint-1200",
4
+ "epoch": 1.6,
5
  "eval_steps": 100,
6
+ "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
161
  "eval_samples_per_second": 2.656,
162
  "eval_steps_per_second": 0.333,
163
  "step": 1100
164
+ },
165
+ {
166
+ "epoch": 1.6,
167
+ "learning_rate": 0.0002,
168
+ "loss": 0.8055,
169
+ "step": 1200
170
+ },
171
+ {
172
+ "epoch": 1.6,
173
+ "eval_loss": 0.9755277633666992,
174
+ "eval_runtime": 725.2603,
175
+ "eval_samples_per_second": 2.661,
176
+ "eval_steps_per_second": 0.334,
177
+ "step": 1200
178
  }
179
  ],
180
  "logging_steps": 100,
 
182
  "num_input_tokens_seen": 0,
183
  "num_train_epochs": 3,
184
  "save_steps": 100,
185
+ "total_flos": 3.108344477177856e+17,
186
  "trial_name": null,
187
  "trial_params": null
188
  }