Hanzalwi commited on
Commit
589441f
1 Parent(s): 7132486

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/README.md CHANGED
@@ -236,4 +236,23 @@ The following `bitsandbytes` quantization config was used during training:
236
  ### Framework versions
237
 
238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  - PEFT 0.6.3.dev0
 
236
  ### Framework versions
237
 
238
 
239
+ - PEFT 0.6.3.dev0
240
+ ## Training procedure
241
+
242
+
243
+ The following `bitsandbytes` quantization config was used during training:
244
+ - quant_method: bitsandbytes
245
+ - load_in_8bit: True
246
+ - load_in_4bit: False
247
+ - llm_int8_threshold: 6.0
248
+ - llm_int8_skip_modules: None
249
+ - llm_int8_enable_fp32_cpu_offload: False
250
+ - llm_int8_has_fp16_weight: False
251
+ - bnb_4bit_quant_type: fp4
252
+ - bnb_4bit_use_double_quant: False
253
+ - bnb_4bit_compute_dtype: float32
254
+
255
+ ### Framework versions
256
+
257
+
258
  - PEFT 0.6.3.dev0
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1489584661a145618f26da9edffe21542c0e6654208c4eac8ad109fa1b04c97
3
  size 9444296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aacbf34fb992bb8dc5e57b13720c266487032710f51d5b6c711007d5d5513f8e
3
  size 9444296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c348e4fe6c89cc2cb07f20e2d3f6659d899437689a688748aea10f07481f708
3
  size 18902665
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1dc382a687d06d81f31e38fee76f898054cf75d838954a57b9c91cb8ae8d8c99
3
  size 18902665
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ac8a1ed22dccfd0b73fde0039db75261c479f8649c9b62376ee4e503343b09a
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2d13059a2d496303aa065549223e9500326c02d6d5046828f86e7c592e199d5
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f10a983aa914555fea6e5c0db8d7ddbaebbe7e28546c78ee0e93ac76cbc28436
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:216f76b8039f833c337db298c81f13b12082d5fd4f9d866cecd34b2ca7550b37
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.360133409500122,
3
- "best_model_checkpoint": "./outputs/checkpoint-2200",
4
- "epoch": 2.9333333333333336,
5
  "eval_steps": 100,
6
- "global_step": 2200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,310 +11,16 @@
11
  {
12
  "epoch": 0.13,
13
  "learning_rate": 0.0002,
14
- "loss": 1.4578,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.13,
19
- "eval_loss": 1.567879319190979,
20
- "eval_runtime": 93.4626,
21
- "eval_samples_per_second": 15.525,
22
- "eval_steps_per_second": 1.947,
23
  "step": 100
24
- },
25
- {
26
- "epoch": 0.27,
27
- "learning_rate": 0.0002,
28
- "loss": 1.2977,
29
- "step": 200
30
- },
31
- {
32
- "epoch": 0.27,
33
- "eval_loss": 1.5340639352798462,
34
- "eval_runtime": 93.3831,
35
- "eval_samples_per_second": 15.538,
36
- "eval_steps_per_second": 1.949,
37
- "step": 200
38
- },
39
- {
40
- "epoch": 0.4,
41
- "learning_rate": 0.0002,
42
- "loss": 1.2762,
43
- "step": 300
44
- },
45
- {
46
- "epoch": 0.4,
47
- "eval_loss": 1.5129507780075073,
48
- "eval_runtime": 93.3176,
49
- "eval_samples_per_second": 15.549,
50
- "eval_steps_per_second": 1.95,
51
- "step": 300
52
- },
53
- {
54
- "epoch": 0.53,
55
- "learning_rate": 0.0002,
56
- "loss": 1.2673,
57
- "step": 400
58
- },
59
- {
60
- "epoch": 0.53,
61
- "eval_loss": 1.4979034662246704,
62
- "eval_runtime": 93.4276,
63
- "eval_samples_per_second": 15.531,
64
- "eval_steps_per_second": 1.948,
65
- "step": 400
66
- },
67
- {
68
- "epoch": 0.67,
69
- "learning_rate": 0.0002,
70
- "loss": 1.2497,
71
- "step": 500
72
- },
73
- {
74
- "epoch": 0.67,
75
- "eval_loss": 1.4787811040878296,
76
- "eval_runtime": 93.1297,
77
- "eval_samples_per_second": 15.58,
78
- "eval_steps_per_second": 1.954,
79
- "step": 500
80
- },
81
- {
82
- "epoch": 0.8,
83
- "learning_rate": 0.0002,
84
- "loss": 1.2326,
85
- "step": 600
86
- },
87
- {
88
- "epoch": 0.8,
89
- "eval_loss": 1.4662799835205078,
90
- "eval_runtime": 93.3055,
91
- "eval_samples_per_second": 15.551,
92
- "eval_steps_per_second": 1.951,
93
- "step": 600
94
- },
95
- {
96
- "epoch": 0.93,
97
- "learning_rate": 0.0002,
98
- "loss": 1.2216,
99
- "step": 700
100
- },
101
- {
102
- "epoch": 0.93,
103
- "eval_loss": 1.455640435218811,
104
- "eval_runtime": 93.4836,
105
- "eval_samples_per_second": 15.521,
106
- "eval_steps_per_second": 1.947,
107
- "step": 700
108
- },
109
- {
110
- "epoch": 1.07,
111
- "learning_rate": 0.0002,
112
- "loss": 1.2091,
113
- "step": 800
114
- },
115
- {
116
- "epoch": 1.07,
117
- "eval_loss": 1.446093201637268,
118
- "eval_runtime": 93.3855,
119
- "eval_samples_per_second": 15.538,
120
- "eval_steps_per_second": 1.949,
121
- "step": 800
122
- },
123
- {
124
- "epoch": 1.2,
125
- "learning_rate": 0.0002,
126
- "loss": 1.1928,
127
- "step": 900
128
- },
129
- {
130
- "epoch": 1.2,
131
- "eval_loss": 1.4368518590927124,
132
- "eval_runtime": 93.1897,
133
- "eval_samples_per_second": 15.57,
134
- "eval_steps_per_second": 1.953,
135
- "step": 900
136
- },
137
- {
138
- "epoch": 1.33,
139
- "learning_rate": 0.0002,
140
- "loss": 1.1859,
141
- "step": 1000
142
- },
143
- {
144
- "epoch": 1.33,
145
- "eval_loss": 1.4286835193634033,
146
- "eval_runtime": 93.2996,
147
- "eval_samples_per_second": 15.552,
148
- "eval_steps_per_second": 1.951,
149
- "step": 1000
150
- },
151
- {
152
- "epoch": 1.47,
153
- "learning_rate": 0.0002,
154
- "loss": 1.1844,
155
- "step": 1100
156
- },
157
- {
158
- "epoch": 1.47,
159
- "eval_loss": 1.4184046983718872,
160
- "eval_runtime": 93.2489,
161
- "eval_samples_per_second": 15.56,
162
- "eval_steps_per_second": 1.952,
163
- "step": 1100
164
- },
165
- {
166
- "epoch": 1.6,
167
- "learning_rate": 0.0002,
168
- "loss": 1.1698,
169
- "step": 1200
170
- },
171
- {
172
- "epoch": 1.6,
173
- "eval_loss": 1.4164544343948364,
174
- "eval_runtime": 93.4373,
175
- "eval_samples_per_second": 15.529,
176
- "eval_steps_per_second": 1.948,
177
- "step": 1200
178
- },
179
- {
180
- "epoch": 1.73,
181
- "learning_rate": 0.0002,
182
- "loss": 1.1838,
183
- "step": 1300
184
- },
185
- {
186
- "epoch": 1.73,
187
- "eval_loss": 1.4063118696212769,
188
- "eval_runtime": 93.4223,
189
- "eval_samples_per_second": 15.532,
190
- "eval_steps_per_second": 1.948,
191
- "step": 1300
192
- },
193
- {
194
- "epoch": 1.87,
195
- "learning_rate": 0.0002,
196
- "loss": 1.1689,
197
- "step": 1400
198
- },
199
- {
200
- "epoch": 1.87,
201
- "eval_loss": 1.3972649574279785,
202
- "eval_runtime": 93.3855,
203
- "eval_samples_per_second": 15.538,
204
- "eval_steps_per_second": 1.949,
205
- "step": 1400
206
- },
207
- {
208
- "epoch": 2.0,
209
- "learning_rate": 0.0002,
210
- "loss": 1.1682,
211
- "step": 1500
212
- },
213
- {
214
- "epoch": 2.0,
215
- "eval_loss": 1.3931400775909424,
216
- "eval_runtime": 93.322,
217
- "eval_samples_per_second": 15.548,
218
- "eval_steps_per_second": 1.95,
219
- "step": 1500
220
- },
221
- {
222
- "epoch": 2.13,
223
- "learning_rate": 0.0002,
224
- "loss": 1.1454,
225
- "step": 1600
226
- },
227
- {
228
- "epoch": 2.13,
229
- "eval_loss": 1.3893111944198608,
230
- "eval_runtime": 93.4672,
231
- "eval_samples_per_second": 15.524,
232
- "eval_steps_per_second": 1.947,
233
- "step": 1600
234
- },
235
- {
236
- "epoch": 2.27,
237
- "learning_rate": 0.0002,
238
- "loss": 1.1347,
239
- "step": 1700
240
- },
241
- {
242
- "epoch": 2.27,
243
- "eval_loss": 1.384419560432434,
244
- "eval_runtime": 93.2606,
245
- "eval_samples_per_second": 15.559,
246
- "eval_steps_per_second": 1.952,
247
- "step": 1700
248
- },
249
- {
250
- "epoch": 2.4,
251
- "learning_rate": 0.0002,
252
- "loss": 1.1295,
253
- "step": 1800
254
- },
255
- {
256
- "epoch": 2.4,
257
- "eval_loss": 1.3802986145019531,
258
- "eval_runtime": 93.2911,
259
- "eval_samples_per_second": 15.553,
260
- "eval_steps_per_second": 1.951,
261
- "step": 1800
262
- },
263
- {
264
- "epoch": 2.53,
265
- "learning_rate": 0.0002,
266
- "loss": 1.1358,
267
- "step": 1900
268
- },
269
- {
270
- "epoch": 2.53,
271
- "eval_loss": 1.371971845626831,
272
- "eval_runtime": 93.1978,
273
- "eval_samples_per_second": 15.569,
274
- "eval_steps_per_second": 1.953,
275
- "step": 1900
276
- },
277
- {
278
- "epoch": 2.67,
279
- "learning_rate": 0.0002,
280
- "loss": 1.1351,
281
- "step": 2000
282
- },
283
- {
284
- "epoch": 2.67,
285
- "eval_loss": 1.3672411441802979,
286
- "eval_runtime": 93.2635,
287
- "eval_samples_per_second": 15.558,
288
- "eval_steps_per_second": 1.951,
289
- "step": 2000
290
- },
291
- {
292
- "epoch": 2.8,
293
- "learning_rate": 0.0002,
294
- "loss": 1.1338,
295
- "step": 2100
296
- },
297
- {
298
- "epoch": 2.8,
299
- "eval_loss": 1.361167311668396,
300
- "eval_runtime": 93.3675,
301
- "eval_samples_per_second": 15.541,
302
- "eval_steps_per_second": 1.949,
303
- "step": 2100
304
- },
305
- {
306
- "epoch": 2.93,
307
- "learning_rate": 0.0002,
308
- "loss": 1.1185,
309
- "step": 2200
310
- },
311
- {
312
- "epoch": 2.93,
313
- "eval_loss": 1.360133409500122,
314
- "eval_runtime": 93.453,
315
- "eval_samples_per_second": 15.527,
316
- "eval_steps_per_second": 1.948,
317
- "step": 2200
318
  }
319
  ],
320
  "logging_steps": 100,
@@ -322,7 +28,7 @@
322
  "num_input_tokens_seen": 0,
323
  "num_train_epochs": 3,
324
  "save_steps": 100,
325
- "total_flos": 1.2864184157454336e+17,
326
  "trial_name": null,
327
  "trial_params": null
328
  }
 
1
  {
2
+ "best_metric": 1.5645889043807983,
3
+ "best_model_checkpoint": "./outputs/checkpoint-100",
4
+ "epoch": 0.13333333333333333,
5
  "eval_steps": 100,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 0.13,
13
  "learning_rate": 0.0002,
14
+ "loss": 1.4574,
15
  "step": 100
16
  },
17
  {
18
  "epoch": 0.13,
19
+ "eval_loss": 1.5645889043807983,
20
+ "eval_runtime": 93.6488,
21
+ "eval_samples_per_second": 15.494,
22
+ "eval_steps_per_second": 1.943,
23
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
25
  ],
26
  "logging_steps": 100,
 
28
  "num_input_tokens_seen": 0,
29
  "num_train_epochs": 3,
30
  "save_steps": 100,
31
+ "total_flos": 5840664718049280.0,
32
  "trial_name": null,
33
  "trial_params": null
34
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:872967956433a8e00b1b42d36058c7ecb4b8e0f5bd219cf9544c0b5bab765459
3
  size 4283
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41885a31fff2260d7edee64e97bbc2b6e32bcd72e50bae7f734f834ae0cf56b3
3
  size 4283