shadyy commited on
Commit
894704c
1 Parent(s): d61bb18

Upload 13 files

Browse files
README.md CHANGED
@@ -1,3 +1,34 @@
1
  ---
2
- license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - quant_method: bitsandbytes
9
+ - load_in_8bit: False
10
+ - load_in_4bit: True
11
+ - llm_int8_threshold: 6.0
12
+ - llm_int8_skip_modules: None
13
+ - llm_int8_enable_fp32_cpu_offload: False
14
+ - llm_int8_has_fp16_weight: False
15
+ - bnb_4bit_quant_type: nf4
16
+ - bnb_4bit_use_double_quant: False
17
+ - bnb_4bit_compute_dtype: float16
18
+
19
+ The following `bitsandbytes` quantization config was used during training:
20
+ - quant_method: bitsandbytes
21
+ - load_in_8bit: False
22
+ - load_in_4bit: True
23
+ - llm_int8_threshold: 6.0
24
+ - llm_int8_skip_modules: None
25
+ - llm_int8_enable_fp32_cpu_offload: False
26
+ - llm_int8_has_fp16_weight: False
27
+ - bnb_4bit_quant_type: nf4
28
+ - bnb_4bit_use_double_quant: False
29
+ - bnb_4bit_compute_dtype: float16
30
+ ### Framework versions
31
+
32
+ - PEFT 0.5.0
33
+
34
+ - PEFT 0.5.0
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f98bbfcbe659cc25dfbee18f97558c617e8fa2ef5fd232dd8600415f43b13153
3
+ size 27308941
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66588c3855a4d071abb185a5de29d58636628b20737db45de801babef2e48b86
3
+ size 54633541
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c152074a486243089e4fc0fdee0a373a30fb0e0a6e40eb5fd0d36fdafc97a155
3
+ size 443
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4d65916d2dcd7f044bff5e79288a434f41a223a734e1f4a58f6f2ac6b137b57
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cacd2a6d3db6e47d0be401d663dd74996b2fd50a8b99969cbd48df91dd275a7
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<unk>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "bos_token": "<s>",
30
+ "clean_up_tokenization_spaces": false,
31
+ "eos_token": "</s>",
32
+ "legacy": true,
33
+ "model_max_length": 1024,
34
+ "pad_token": "</s>",
35
+ "sp_model_kwargs": {},
36
+ "spaces_between_special_tokens": false,
37
+ "tokenizer_class": "LlamaTokenizer",
38
+ "unk_token": "<unk>",
39
+ "use_default_system_prompt": true
40
+ }
trainer_state.json ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 68.0,
5
+ "eval_steps": 500,
6
+ "global_step": 3094,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.79,
13
+ "learning_rate": 1.5555555555555555e-05,
14
+ "loss": 2.3054,
15
+ "step": 36
16
+ },
17
+ {
18
+ "epoch": 1.58,
19
+ "learning_rate": 3.155555555555556e-05,
20
+ "loss": 2.1949,
21
+ "step": 72
22
+ },
23
+ {
24
+ "epoch": 2.37,
25
+ "learning_rate": 4.755555555555556e-05,
26
+ "loss": 1.978,
27
+ "step": 108
28
+ },
29
+ {
30
+ "epoch": 3.16,
31
+ "learning_rate": 6.355555555555556e-05,
32
+ "loss": 1.9161,
33
+ "step": 144
34
+ },
35
+ {
36
+ "epoch": 3.96,
37
+ "learning_rate": 7.955555555555556e-05,
38
+ "loss": 1.8945,
39
+ "step": 180
40
+ },
41
+ {
42
+ "epoch": 4.75,
43
+ "learning_rate": 9.555555555555557e-05,
44
+ "loss": 1.8682,
45
+ "step": 216
46
+ },
47
+ {
48
+ "epoch": 5.54,
49
+ "learning_rate": 0.00011155555555555556,
50
+ "loss": 1.8492,
51
+ "step": 252
52
+ },
53
+ {
54
+ "epoch": 6.33,
55
+ "learning_rate": 0.00012755555555555556,
56
+ "loss": 1.8337,
57
+ "step": 288
58
+ },
59
+ {
60
+ "epoch": 7.12,
61
+ "learning_rate": 0.0001431111111111111,
62
+ "loss": 1.8144,
63
+ "step": 324
64
+ },
65
+ {
66
+ "epoch": 7.91,
67
+ "learning_rate": 0.00015911111111111112,
68
+ "loss": 1.7907,
69
+ "step": 360
70
+ },
71
+ {
72
+ "epoch": 8.7,
73
+ "learning_rate": 0.00017511111111111113,
74
+ "loss": 1.7382,
75
+ "step": 396
76
+ },
77
+ {
78
+ "epoch": 9.49,
79
+ "learning_rate": 0.00019111111111111114,
80
+ "loss": 1.6991,
81
+ "step": 432
82
+ },
83
+ {
84
+ "epoch": 10.29,
85
+ "learning_rate": 0.00019925925925925927,
86
+ "loss": 1.6405,
87
+ "step": 468
88
+ },
89
+ {
90
+ "epoch": 11.08,
91
+ "learning_rate": 0.00019748148148148148,
92
+ "loss": 1.5929,
93
+ "step": 504
94
+ },
95
+ {
96
+ "epoch": 11.87,
97
+ "learning_rate": 0.0001957037037037037,
98
+ "loss": 1.5414,
99
+ "step": 540
100
+ },
101
+ {
102
+ "epoch": 12.66,
103
+ "learning_rate": 0.00019392592592592592,
104
+ "loss": 1.447,
105
+ "step": 576
106
+ },
107
+ {
108
+ "epoch": 13.45,
109
+ "learning_rate": 0.00019214814814814816,
110
+ "loss": 1.3947,
111
+ "step": 612
112
+ },
113
+ {
114
+ "epoch": 14.24,
115
+ "learning_rate": 0.00019037037037037037,
116
+ "loss": 1.3655,
117
+ "step": 648
118
+ },
119
+ {
120
+ "epoch": 15.03,
121
+ "learning_rate": 0.0001885925925925926,
122
+ "loss": 1.2873,
123
+ "step": 684
124
+ },
125
+ {
126
+ "epoch": 15.82,
127
+ "learning_rate": 0.0001868148148148148,
128
+ "loss": 1.2198,
129
+ "step": 720
130
+ },
131
+ {
132
+ "epoch": 16.62,
133
+ "learning_rate": 0.00018503703703703705,
134
+ "loss": 1.1512,
135
+ "step": 756
136
+ },
137
+ {
138
+ "epoch": 17.41,
139
+ "learning_rate": 0.00018325925925925926,
140
+ "loss": 1.1286,
141
+ "step": 792
142
+ },
143
+ {
144
+ "epoch": 18.2,
145
+ "learning_rate": 0.0001814814814814815,
146
+ "loss": 1.0709,
147
+ "step": 828
148
+ },
149
+ {
150
+ "epoch": 18.99,
151
+ "learning_rate": 0.0001797037037037037,
152
+ "loss": 1.0442,
153
+ "step": 864
154
+ },
155
+ {
156
+ "epoch": 19.78,
157
+ "learning_rate": 0.00017792592592592594,
158
+ "loss": 0.971,
159
+ "step": 900
160
+ },
161
+ {
162
+ "epoch": 20.57,
163
+ "learning_rate": 0.00017614814814814815,
164
+ "loss": 0.9412,
165
+ "step": 936
166
+ },
167
+ {
168
+ "epoch": 21.36,
169
+ "learning_rate": 0.00017437037037037039,
170
+ "loss": 0.9084,
171
+ "step": 972
172
+ },
173
+ {
174
+ "epoch": 22.15,
175
+ "learning_rate": 0.0001725925925925926,
176
+ "loss": 0.885,
177
+ "step": 1008
178
+ },
179
+ {
180
+ "epoch": 22.95,
181
+ "learning_rate": 0.00017081481481481483,
182
+ "loss": 0.844,
183
+ "step": 1044
184
+ },
185
+ {
186
+ "epoch": 23.74,
187
+ "learning_rate": 0.00016903703703703704,
188
+ "loss": 0.7975,
189
+ "step": 1080
190
+ },
191
+ {
192
+ "epoch": 24.53,
193
+ "learning_rate": 0.00016725925925925928,
194
+ "loss": 0.7786,
195
+ "step": 1116
196
+ },
197
+ {
198
+ "epoch": 25.32,
199
+ "learning_rate": 0.00016548148148148149,
200
+ "loss": 0.7465,
201
+ "step": 1152
202
+ },
203
+ {
204
+ "epoch": 26.11,
205
+ "learning_rate": 0.00016370370370370372,
206
+ "loss": 0.7311,
207
+ "step": 1188
208
+ },
209
+ {
210
+ "epoch": 26.9,
211
+ "learning_rate": 0.00016192592592592593,
212
+ "loss": 0.6955,
213
+ "step": 1224
214
+ },
215
+ {
216
+ "epoch": 27.69,
217
+ "learning_rate": 0.00016014814814814817,
218
+ "loss": 0.6656,
219
+ "step": 1260
220
+ },
221
+ {
222
+ "epoch": 28.48,
223
+ "learning_rate": 0.00015837037037037038,
224
+ "loss": 0.6507,
225
+ "step": 1296
226
+ },
227
+ {
228
+ "epoch": 29.27,
229
+ "learning_rate": 0.0001565925925925926,
230
+ "loss": 0.6336,
231
+ "step": 1332
232
+ },
233
+ {
234
+ "epoch": 30.07,
235
+ "learning_rate": 0.00015481481481481482,
236
+ "loss": 0.6142,
237
+ "step": 1368
238
+ },
239
+ {
240
+ "epoch": 30.86,
241
+ "learning_rate": 0.00015303703703703706,
242
+ "loss": 0.5865,
243
+ "step": 1404
244
+ },
245
+ {
246
+ "epoch": 31.65,
247
+ "learning_rate": 0.00015125925925925927,
248
+ "loss": 0.5649,
249
+ "step": 1440
250
+ },
251
+ {
252
+ "epoch": 32.44,
253
+ "learning_rate": 0.00014948148148148148,
254
+ "loss": 0.5511,
255
+ "step": 1476
256
+ },
257
+ {
258
+ "epoch": 33.23,
259
+ "learning_rate": 0.0001477037037037037,
260
+ "loss": 0.5329,
261
+ "step": 1512
262
+ },
263
+ {
264
+ "epoch": 34.02,
265
+ "learning_rate": 0.00014592592592592592,
266
+ "loss": 0.5265,
267
+ "step": 1548
268
+ },
269
+ {
270
+ "epoch": 34.81,
271
+ "learning_rate": 0.00014414814814814816,
272
+ "loss": 0.4931,
273
+ "step": 1584
274
+ },
275
+ {
276
+ "epoch": 35.6,
277
+ "learning_rate": 0.00014237037037037037,
278
+ "loss": 0.4904,
279
+ "step": 1620
280
+ },
281
+ {
282
+ "epoch": 36.4,
283
+ "learning_rate": 0.0001405925925925926,
284
+ "loss": 0.466,
285
+ "step": 1656
286
+ },
287
+ {
288
+ "epoch": 37.19,
289
+ "learning_rate": 0.0001388148148148148,
290
+ "loss": 0.4585,
291
+ "step": 1692
292
+ },
293
+ {
294
+ "epoch": 37.98,
295
+ "learning_rate": 0.00013703703703703705,
296
+ "loss": 0.4545,
297
+ "step": 1728
298
+ },
299
+ {
300
+ "epoch": 38.77,
301
+ "learning_rate": 0.00013525925925925926,
302
+ "loss": 0.4244,
303
+ "step": 1764
304
+ },
305
+ {
306
+ "epoch": 39.56,
307
+ "learning_rate": 0.0001334814814814815,
308
+ "loss": 0.4143,
309
+ "step": 1800
310
+ },
311
+ {
312
+ "epoch": 40.35,
313
+ "learning_rate": 0.0001317037037037037,
314
+ "loss": 0.4118,
315
+ "step": 1836
316
+ },
317
+ {
318
+ "epoch": 41.14,
319
+ "learning_rate": 0.00012992592592592594,
320
+ "loss": 0.3963,
321
+ "step": 1872
322
+ },
323
+ {
324
+ "epoch": 41.93,
325
+ "learning_rate": 0.00012814814814814815,
326
+ "loss": 0.3901,
327
+ "step": 1908
328
+ },
329
+ {
330
+ "epoch": 42.73,
331
+ "learning_rate": 0.00012637037037037038,
332
+ "loss": 0.3697,
333
+ "step": 1944
334
+ },
335
+ {
336
+ "epoch": 43.52,
337
+ "learning_rate": 0.0001245925925925926,
338
+ "loss": 0.3595,
339
+ "step": 1980
340
+ },
341
+ {
342
+ "epoch": 44.31,
343
+ "learning_rate": 0.00012281481481481483,
344
+ "loss": 0.3609,
345
+ "step": 2016
346
+ },
347
+ {
348
+ "epoch": 45.1,
349
+ "learning_rate": 0.00012103703703703704,
350
+ "loss": 0.3457,
351
+ "step": 2052
352
+ },
353
+ {
354
+ "epoch": 45.89,
355
+ "learning_rate": 0.00011925925925925927,
356
+ "loss": 0.3342,
357
+ "step": 2088
358
+ },
359
+ {
360
+ "epoch": 46.68,
361
+ "learning_rate": 0.00011748148148148148,
362
+ "loss": 0.3213,
363
+ "step": 2124
364
+ },
365
+ {
366
+ "epoch": 47.47,
367
+ "learning_rate": 0.00011570370370370372,
368
+ "loss": 0.3183,
369
+ "step": 2160
370
+ },
371
+ {
372
+ "epoch": 48.26,
373
+ "learning_rate": 0.00011392592592592593,
374
+ "loss": 0.3067,
375
+ "step": 2196
376
+ },
377
+ {
378
+ "epoch": 49.05,
379
+ "learning_rate": 0.00011214814814814815,
380
+ "loss": 0.3062,
381
+ "step": 2232
382
+ },
383
+ {
384
+ "epoch": 49.85,
385
+ "learning_rate": 0.00011037037037037037,
386
+ "loss": 0.291,
387
+ "step": 2268
388
+ },
389
+ {
390
+ "epoch": 50.64,
391
+ "learning_rate": 0.0001085925925925926,
392
+ "loss": 0.2837,
393
+ "step": 2304
394
+ },
395
+ {
396
+ "epoch": 51.43,
397
+ "learning_rate": 0.0001068148148148148,
398
+ "loss": 0.2768,
399
+ "step": 2340
400
+ },
401
+ {
402
+ "epoch": 52.22,
403
+ "learning_rate": 0.00010503703703703704,
404
+ "loss": 0.2733,
405
+ "step": 2376
406
+ },
407
+ {
408
+ "epoch": 53.01,
409
+ "learning_rate": 0.00010325925925925925,
410
+ "loss": 0.2622,
411
+ "step": 2412
412
+ },
413
+ {
414
+ "epoch": 53.8,
415
+ "learning_rate": 0.00010148148148148149,
416
+ "loss": 0.254,
417
+ "step": 2448
418
+ },
419
+ {
420
+ "epoch": 54.59,
421
+ "learning_rate": 9.970370370370371e-05,
422
+ "loss": 0.247,
423
+ "step": 2484
424
+ },
425
+ {
426
+ "epoch": 55.38,
427
+ "learning_rate": 9.792592592592593e-05,
428
+ "loss": 0.2415,
429
+ "step": 2520
430
+ },
431
+ {
432
+ "epoch": 56.18,
433
+ "learning_rate": 9.614814814814816e-05,
434
+ "loss": 0.2391,
435
+ "step": 2556
436
+ },
437
+ {
438
+ "epoch": 56.97,
439
+ "learning_rate": 9.437037037037038e-05,
440
+ "loss": 0.2318,
441
+ "step": 2592
442
+ },
443
+ {
444
+ "epoch": 57.76,
445
+ "learning_rate": 9.25925925925926e-05,
446
+ "loss": 0.221,
447
+ "step": 2628
448
+ },
449
+ {
450
+ "epoch": 58.55,
451
+ "learning_rate": 9.081481481481482e-05,
452
+ "loss": 0.2179,
453
+ "step": 2664
454
+ },
455
+ {
456
+ "epoch": 59.34,
457
+ "learning_rate": 8.903703703703705e-05,
458
+ "loss": 0.2145,
459
+ "step": 2700
460
+ },
461
+ {
462
+ "epoch": 60.13,
463
+ "learning_rate": 8.725925925925927e-05,
464
+ "loss": 0.209,
465
+ "step": 2736
466
+ },
467
+ {
468
+ "epoch": 60.92,
469
+ "learning_rate": 8.548148148148148e-05,
470
+ "loss": 0.205,
471
+ "step": 2772
472
+ },
473
+ {
474
+ "epoch": 61.71,
475
+ "learning_rate": 8.37037037037037e-05,
476
+ "loss": 0.197,
477
+ "step": 2808
478
+ },
479
+ {
480
+ "epoch": 62.51,
481
+ "learning_rate": 8.192592592592592e-05,
482
+ "loss": 0.1948,
483
+ "step": 2844
484
+ },
485
+ {
486
+ "epoch": 63.3,
487
+ "learning_rate": 8.014814814814815e-05,
488
+ "loss": 0.188,
489
+ "step": 2880
490
+ },
491
+ {
492
+ "epoch": 64.09,
493
+ "learning_rate": 7.837037037037037e-05,
494
+ "loss": 0.188,
495
+ "step": 2916
496
+ },
497
+ {
498
+ "epoch": 64.88,
499
+ "learning_rate": 7.659259259259259e-05,
500
+ "loss": 0.1785,
501
+ "step": 2952
502
+ },
503
+ {
504
+ "epoch": 65.67,
505
+ "learning_rate": 7.481481481481481e-05,
506
+ "loss": 0.1758,
507
+ "step": 2988
508
+ },
509
+ {
510
+ "epoch": 66.46,
511
+ "learning_rate": 7.303703703703704e-05,
512
+ "loss": 0.1703,
513
+ "step": 3024
514
+ },
515
+ {
516
+ "epoch": 67.25,
517
+ "learning_rate": 7.125925925925926e-05,
518
+ "loss": 0.1702,
519
+ "step": 3060
520
+ }
521
+ ],
522
+ "logging_steps": 36,
523
+ "max_steps": 4500,
524
+ "num_train_epochs": 100,
525
+ "save_steps": 500,
526
+ "total_flos": 2.6941037663276237e+18,
527
+ "trial_name": null,
528
+ "trial_params": null
529
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83ffe018190aca9df723f8c9b9ebe7cee2a8e4f04061c455fb99d0ca746c646c
3
+ size 4027