Hanzalwi commited on
Commit
1bd31f2
1 Parent(s): aae1c66

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/README.md CHANGED
@@ -217,4 +217,23 @@ The following `bitsandbytes` quantization config was used during training:
217
  ### Framework versions
218
 
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  - PEFT 0.6.3.dev0
 
217
  ### Framework versions
218
 
219
 
220
+ - PEFT 0.6.3.dev0
221
+ ## Training procedure
222
+
223
+
224
+ The following `bitsandbytes` quantization config was used during training:
225
+ - quant_method: bitsandbytes
226
+ - load_in_8bit: True
227
+ - load_in_4bit: False
228
+ - llm_int8_threshold: 6.0
229
+ - llm_int8_skip_modules: None
230
+ - llm_int8_enable_fp32_cpu_offload: False
231
+ - llm_int8_has_fp16_weight: False
232
+ - bnb_4bit_quant_type: fp4
233
+ - bnb_4bit_use_double_quant: False
234
+ - bnb_4bit_compute_dtype: float32
235
+
236
+ ### Framework versions
237
+
238
+
239
  - PEFT 0.6.3.dev0
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:257f019123780b1c55e505466089f8e97774256b5162ff2a9dd9c53ad0981724
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05aa022c54cc3af0df746afc7a430ec7202a1ad889fa763bfbf083dac63b6a81
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c55efb4794097377f43111558386aeebe7c8a2539fb41154eb47915789b79b92
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44dc5565906114330048a0088ffbb5bd8f8fce97f07e62ec2eafa6ff3e3a4fd0
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:828c793974c0f73b777262e329ad095566fad802fa73d655e45ae82e9eed4154
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2d13059a2d496303aa065549223e9500326c02d6d5046828f86e7c592e199d5
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f10a983aa914555fea6e5c0db8d7ddbaebbe7e28546c78ee0e93ac76cbc28436
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:216f76b8039f833c337db298c81f13b12082d5fd4f9d866cecd34b2ca7550b37
3
  size 627
last-checkpoint/tokenizer.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d81d9b2c9d9db79ea02c00d4c7e79bb77a718dc57ab01f5f3b1cd6649f08993
3
- size 14500569
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:490b3432398e9c45dda0581bf34f6daa9411bda59cd0cd831670d7c86f90d2dd
3
+ size 14500570
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.2910176515579224,
3
- "best_model_checkpoint": "./outputs/checkpoint-2200",
4
- "epoch": 2.9333333333333336,
5
  "eval_steps": 100,
6
- "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,310 +11,16 @@
11
  {
12
  "epoch": 0.13,
13
  "learning_rate": 0.0002,
14
- "loss": 1.855,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.13,
19
- "eval_loss": 1.4976259469985962,
20
- "eval_runtime": 47.2737,
21
- "eval_samples_per_second": 30.694,
22
- "eval_steps_per_second": 3.85,
23
  "step": 100
24
- },
25
- {
26
- "epoch": 0.27,
27
- "learning_rate": 0.0002,
28
- "loss": 1.5891,
29
- "step": 200
30
- },
31
- {
32
- "epoch": 0.27,
33
- "eval_loss": 1.4663879871368408,
34
- "eval_runtime": 47.1865,
35
- "eval_samples_per_second": 30.75,
36
- "eval_steps_per_second": 3.857,
37
- "step": 200
38
- },
39
- {
40
- "epoch": 0.4,
41
- "learning_rate": 0.0002,
42
- "loss": 1.5693,
43
- "step": 300
44
- },
45
- {
46
- "epoch": 0.4,
47
- "eval_loss": 1.4471981525421143,
48
- "eval_runtime": 47.1683,
49
- "eval_samples_per_second": 30.762,
50
- "eval_steps_per_second": 3.859,
51
- "step": 300
52
- },
53
- {
54
- "epoch": 0.53,
55
- "learning_rate": 0.0002,
56
- "loss": 1.5495,
57
- "step": 400
58
- },
59
- {
60
- "epoch": 0.53,
61
- "eval_loss": 1.4266287088394165,
62
- "eval_runtime": 47.1866,
63
- "eval_samples_per_second": 30.75,
64
- "eval_steps_per_second": 3.857,
65
- "step": 400
66
- },
67
- {
68
- "epoch": 0.67,
69
- "learning_rate": 0.0002,
70
- "loss": 1.5354,
71
- "step": 500
72
- },
73
- {
74
- "epoch": 0.67,
75
- "eval_loss": 1.410528540611267,
76
- "eval_runtime": 47.2451,
77
- "eval_samples_per_second": 30.712,
78
- "eval_steps_per_second": 3.852,
79
- "step": 500
80
- },
81
- {
82
- "epoch": 0.8,
83
- "learning_rate": 0.0002,
84
- "loss": 1.5198,
85
- "step": 600
86
- },
87
- {
88
- "epoch": 0.8,
89
- "eval_loss": 1.4007145166397095,
90
- "eval_runtime": 46.9739,
91
- "eval_samples_per_second": 30.89,
92
- "eval_steps_per_second": 3.874,
93
- "step": 600
94
- },
95
- {
96
- "epoch": 0.93,
97
- "learning_rate": 0.0002,
98
- "loss": 1.5095,
99
- "step": 700
100
- },
101
- {
102
- "epoch": 0.93,
103
- "eval_loss": 1.388436198234558,
104
- "eval_runtime": 47.0302,
105
- "eval_samples_per_second": 30.853,
106
- "eval_steps_per_second": 3.87,
107
- "step": 700
108
- },
109
- {
110
- "epoch": 1.07,
111
- "learning_rate": 0.0002,
112
- "loss": 1.488,
113
- "step": 800
114
- },
115
- {
116
- "epoch": 1.07,
117
- "eval_loss": 1.3770211935043335,
118
- "eval_runtime": 47.1554,
119
- "eval_samples_per_second": 30.771,
120
- "eval_steps_per_second": 3.86,
121
- "step": 800
122
- },
123
- {
124
- "epoch": 1.2,
125
- "learning_rate": 0.0002,
126
- "loss": 1.4685,
127
- "step": 900
128
- },
129
- {
130
- "epoch": 1.2,
131
- "eval_loss": 1.3670101165771484,
132
- "eval_runtime": 47.4079,
133
- "eval_samples_per_second": 30.607,
134
- "eval_steps_per_second": 3.839,
135
- "step": 900
136
- },
137
- {
138
- "epoch": 1.33,
139
- "learning_rate": 0.0002,
140
- "loss": 1.4583,
141
- "step": 1000
142
- },
143
- {
144
- "epoch": 1.33,
145
- "eval_loss": 1.3592472076416016,
146
- "eval_runtime": 47.3317,
147
- "eval_samples_per_second": 30.656,
148
- "eval_steps_per_second": 3.845,
149
- "step": 1000
150
- },
151
- {
152
- "epoch": 1.47,
153
- "learning_rate": 0.0002,
154
- "loss": 1.4546,
155
- "step": 1100
156
- },
157
- {
158
- "epoch": 1.47,
159
- "eval_loss": 1.3527010679244995,
160
- "eval_runtime": 47.2776,
161
- "eval_samples_per_second": 30.691,
162
- "eval_steps_per_second": 3.85,
163
- "step": 1100
164
- },
165
- {
166
- "epoch": 1.6,
167
- "learning_rate": 0.0002,
168
- "loss": 1.4392,
169
- "step": 1200
170
- },
171
- {
172
- "epoch": 1.6,
173
- "eval_loss": 1.3437373638153076,
174
- "eval_runtime": 47.1936,
175
- "eval_samples_per_second": 30.746,
176
- "eval_steps_per_second": 3.856,
177
- "step": 1200
178
- },
179
- {
180
- "epoch": 1.73,
181
- "learning_rate": 0.0002,
182
- "loss": 1.45,
183
- "step": 1300
184
- },
185
- {
186
- "epoch": 1.73,
187
- "eval_loss": 1.3367496728897095,
188
- "eval_runtime": 47.2517,
189
- "eval_samples_per_second": 30.708,
190
- "eval_steps_per_second": 3.852,
191
- "step": 1300
192
- },
193
- {
194
- "epoch": 1.87,
195
- "learning_rate": 0.0002,
196
- "loss": 1.4321,
197
- "step": 1400
198
- },
199
- {
200
- "epoch": 1.87,
201
- "eval_loss": 1.3307961225509644,
202
- "eval_runtime": 47.1256,
203
- "eval_samples_per_second": 30.79,
204
- "eval_steps_per_second": 3.862,
205
- "step": 1400
206
- },
207
- {
208
- "epoch": 2.0,
209
- "learning_rate": 0.0002,
210
- "loss": 1.4336,
211
- "step": 1500
212
- },
213
- {
214
- "epoch": 2.0,
215
- "eval_loss": 1.3263577222824097,
216
- "eval_runtime": 47.1607,
217
- "eval_samples_per_second": 30.767,
218
- "eval_steps_per_second": 3.859,
219
- "step": 1500
220
- },
221
- {
222
- "epoch": 2.13,
223
- "learning_rate": 0.0002,
224
- "loss": 1.3981,
225
- "step": 1600
226
- },
227
- {
228
- "epoch": 2.13,
229
- "eval_loss": 1.319887638092041,
230
- "eval_runtime": 47.019,
231
- "eval_samples_per_second": 30.86,
232
- "eval_steps_per_second": 3.871,
233
- "step": 1600
234
- },
235
- {
236
- "epoch": 2.27,
237
- "learning_rate": 0.0002,
238
- "loss": 1.3969,
239
- "step": 1700
240
- },
241
- {
242
- "epoch": 2.27,
243
- "eval_loss": 1.3168717622756958,
244
- "eval_runtime": 47.2245,
245
- "eval_samples_per_second": 30.726,
246
- "eval_steps_per_second": 3.854,
247
- "step": 1700
248
- },
249
- {
250
- "epoch": 2.4,
251
- "learning_rate": 0.0002,
252
- "loss": 1.3862,
253
- "step": 1800
254
- },
255
- {
256
- "epoch": 2.4,
257
- "eval_loss": 1.3101677894592285,
258
- "eval_runtime": 47.1712,
259
- "eval_samples_per_second": 30.76,
260
- "eval_steps_per_second": 3.858,
261
- "step": 1800
262
- },
263
- {
264
- "epoch": 2.53,
265
- "learning_rate": 0.0002,
266
- "loss": 1.3863,
267
- "step": 1900
268
- },
269
- {
270
- "epoch": 2.53,
271
- "eval_loss": 1.304863691329956,
272
- "eval_runtime": 47.3226,
273
- "eval_samples_per_second": 30.662,
274
- "eval_steps_per_second": 3.846,
275
- "step": 1900
276
- },
277
- {
278
- "epoch": 2.67,
279
- "learning_rate": 0.0002,
280
- "loss": 1.3918,
281
- "step": 2000
282
- },
283
- {
284
- "epoch": 2.67,
285
- "eval_loss": 1.2992783784866333,
286
- "eval_runtime": 47.3036,
287
- "eval_samples_per_second": 30.674,
288
- "eval_steps_per_second": 3.847,
289
- "step": 2000
290
- },
291
- {
292
- "epoch": 2.8,
293
- "learning_rate": 0.0002,
294
- "loss": 1.3869,
295
- "step": 2100
296
- },
297
- {
298
- "epoch": 2.8,
299
- "eval_loss": 1.2948063611984253,
300
- "eval_runtime": 47.587,
301
- "eval_samples_per_second": 30.492,
302
- "eval_steps_per_second": 3.825,
303
- "step": 2100
304
- },
305
- {
306
- "epoch": 2.93,
307
- "learning_rate": 0.0002,
308
- "loss": 1.3818,
309
- "step": 2200
310
- },
311
- {
312
- "epoch": 2.93,
313
- "eval_loss": 1.2910176515579224,
314
- "eval_runtime": 47.3703,
315
- "eval_samples_per_second": 30.631,
316
- "eval_steps_per_second": 3.842,
317
- "step": 2200
318
  }
319
  ],
320
  "logging_steps": 100,
@@ -322,7 +28,7 @@
322
  "num_input_tokens_seen": 0,
323
  "num_train_epochs": 3,
324
  "save_steps": 100,
325
- "total_flos": 7.376989486841856e+16,
326
  "trial_name": null,
327
  "trial_params": null
328
  }
 
1
  {
2
+ "best_metric": 1.567879319190979,
3
+ "best_model_checkpoint": "./outputs/checkpoint-100",
4
+ "epoch": 0.13333333333333333,
5
  "eval_steps": 100,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 0.13,
13
  "learning_rate": 0.0002,
14
+ "loss": 1.4578,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.13,
19
+ "eval_loss": 1.567879319190979,
20
+ "eval_runtime": 93.4626,
21
+ "eval_samples_per_second": 15.525,
22
+ "eval_steps_per_second": 1.947,
23
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
25
  ],
26
  "logging_steps": 100,
 
28
  "num_input_tokens_seen": 0,
29
  "num_train_epochs": 3,
30
  "save_steps": 100,
31
+ "total_flos": 5840664718049280.0,
32
  "trial_name": null,
33
  "trial_params": null
34
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9adcbc18933134cacfbe21bf9e53fed6d4bf177703cbf6a315aac272cf5f9925
3
  size 4283
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:872967956433a8e00b1b42d36058c7ecb4b8e0f5bd219cf9544c0b5bab765459
3
  size 4283