Grogros commited on
Commit
525a298
·
verified ·
1 Parent(s): 33caf9c

Training in progress, step 500, checkpoint

Browse files
checkpoint-500/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "meta-llama/Llama-3.2-1B-Instruct",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 128000,
9
+ "eos_token_id": [
10
+ 128001,
11
+ 128008,
12
+ 128009
13
+ ],
14
+ "head_dim": 64,
15
+ "hidden_act": "silu",
16
+ "hidden_size": 2048,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 8192,
19
+ "max_position_embeddings": 131072,
20
+ "mlp_bias": false,
21
+ "model_type": "llama",
22
+ "num_attention_heads": 32,
23
+ "num_hidden_layers": 16,
24
+ "num_key_value_heads": 8,
25
+ "pretraining_tp": 1,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 32.0,
29
+ "high_freq_factor": 4.0,
30
+ "low_freq_factor": 1.0,
31
+ "original_max_position_embeddings": 8192,
32
+ "rope_type": "llama3"
33
+ },
34
+ "rope_theta": 500000.0,
35
+ "tie_word_embeddings": true,
36
+ "torch_dtype": "bfloat16",
37
+ "transformers_version": "4.46.3",
38
+ "use_cache": true,
39
+ "vocab_size": 128256
40
+ }
checkpoint-500/generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128008,
7
+ 128009
8
+ ],
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.46.3"
12
+ }
checkpoint-500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fde65182ccdfcea6ba3d8c55f7fc06433a89ca47f004b8f6d559b3615f7ae81
3
+ size 2471645608
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e5caca9fd86a9289cea4412008fb06c5674453f759c5de178150ccabba15f8e
3
+ size 3724602
checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2dc0cb185223051d88dd0147199fae1985385276c97328acbd8bc4d0fc6bc96
3
+ size 14244
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31f20d3fd8f2ca072c88294e1b245d2fc0feec4c84032b4ae4d3711181320962
3
+ size 1064
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9627085377821394,
5
+ "eval_steps": 500,
6
+ "global_step": 500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.03925417075564279,
13
+ "grad_norm": 81.5,
14
+ "learning_rate": 8.000000000000001e-07,
15
+ "loss": 0.3818,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.07850834151128558,
20
+ "grad_norm": 322.0,
21
+ "learning_rate": 1.6000000000000001e-06,
22
+ "loss": 0.367,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.11776251226692837,
27
+ "grad_norm": 784.0,
28
+ "learning_rate": 2.4000000000000003e-06,
29
+ "loss": 0.4472,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.15701668302257116,
34
+ "grad_norm": 350.0,
35
+ "learning_rate": 3.2000000000000003e-06,
36
+ "loss": 0.6806,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.19627085377821393,
41
+ "grad_norm": 186.0,
42
+ "learning_rate": 4.000000000000001e-06,
43
+ "loss": 0.9139,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 0.23552502453385674,
48
+ "grad_norm": 199.0,
49
+ "learning_rate": 4.800000000000001e-06,
50
+ "loss": 0.8205,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 0.2747791952894995,
55
+ "grad_norm": 118.0,
56
+ "learning_rate": 5.600000000000001e-06,
57
+ "loss": 0.9017,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 0.3140333660451423,
62
+ "grad_norm": 616.0,
63
+ "learning_rate": 6.4000000000000006e-06,
64
+ "loss": 3.1966,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 0.35328753680078506,
69
+ "grad_norm": 692.0,
70
+ "learning_rate": 7.2000000000000005e-06,
71
+ "loss": 1.0627,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 0.39254170755642787,
76
+ "grad_norm": 75.5,
77
+ "learning_rate": 8.000000000000001e-06,
78
+ "loss": 7.3782,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 0.43179587831207067,
83
+ "grad_norm": 908.0,
84
+ "learning_rate": 8.8e-06,
85
+ "loss": 1.1091,
86
+ "step": 110
87
+ },
88
+ {
89
+ "epoch": 0.47105004906771347,
90
+ "grad_norm": 61.5,
91
+ "learning_rate": 9.600000000000001e-06,
92
+ "loss": 0.9262,
93
+ "step": 120
94
+ },
95
+ {
96
+ "epoch": 0.5103042198233563,
97
+ "grad_norm": 45.25,
98
+ "learning_rate": 1.04e-05,
99
+ "loss": 0.6774,
100
+ "step": 130
101
+ },
102
+ {
103
+ "epoch": 0.549558390578999,
104
+ "grad_norm": 39.75,
105
+ "learning_rate": 1.1200000000000001e-05,
106
+ "loss": 0.8204,
107
+ "step": 140
108
+ },
109
+ {
110
+ "epoch": 0.5888125613346418,
111
+ "grad_norm": 38.0,
112
+ "learning_rate": 1.2e-05,
113
+ "loss": 0.6443,
114
+ "step": 150
115
+ },
116
+ {
117
+ "epoch": 0.6280667320902846,
118
+ "grad_norm": 31.25,
119
+ "learning_rate": 1.2800000000000001e-05,
120
+ "loss": 0.607,
121
+ "step": 160
122
+ },
123
+ {
124
+ "epoch": 0.6673209028459274,
125
+ "grad_norm": 33.5,
126
+ "learning_rate": 1.3600000000000002e-05,
127
+ "loss": 0.674,
128
+ "step": 170
129
+ },
130
+ {
131
+ "epoch": 0.7065750736015701,
132
+ "grad_norm": 37.5,
133
+ "learning_rate": 1.4400000000000001e-05,
134
+ "loss": 0.6806,
135
+ "step": 180
136
+ },
137
+ {
138
+ "epoch": 0.745829244357213,
139
+ "grad_norm": 36.25,
140
+ "learning_rate": 1.5200000000000002e-05,
141
+ "loss": 0.6131,
142
+ "step": 190
143
+ },
144
+ {
145
+ "epoch": 0.7850834151128557,
146
+ "grad_norm": 43.25,
147
+ "learning_rate": 1.6000000000000003e-05,
148
+ "loss": 0.5914,
149
+ "step": 200
150
+ },
151
+ {
152
+ "epoch": 0.8243375858684985,
153
+ "grad_norm": 27.75,
154
+ "learning_rate": 1.6800000000000002e-05,
155
+ "loss": 0.5488,
156
+ "step": 210
157
+ },
158
+ {
159
+ "epoch": 0.8635917566241413,
160
+ "grad_norm": 47.75,
161
+ "learning_rate": 1.76e-05,
162
+ "loss": 0.7107,
163
+ "step": 220
164
+ },
165
+ {
166
+ "epoch": 0.9028459273797841,
167
+ "grad_norm": 16.375,
168
+ "learning_rate": 1.8400000000000003e-05,
169
+ "loss": 1.2491,
170
+ "step": 230
171
+ },
172
+ {
173
+ "epoch": 0.9421000981354269,
174
+ "grad_norm": 35.5,
175
+ "learning_rate": 1.9200000000000003e-05,
176
+ "loss": 0.6448,
177
+ "step": 240
178
+ },
179
+ {
180
+ "epoch": 0.9813542688910697,
181
+ "grad_norm": 22.875,
182
+ "learning_rate": 2e-05,
183
+ "loss": 0.5682,
184
+ "step": 250
185
+ },
186
+ {
187
+ "epoch": 1.0206084396467126,
188
+ "grad_norm": 32.75,
189
+ "learning_rate": 1.9999025240093045e-05,
190
+ "loss": 0.5689,
191
+ "step": 260
192
+ },
193
+ {
194
+ "epoch": 1.0598626104023552,
195
+ "grad_norm": 28.375,
196
+ "learning_rate": 1.9996101150403543e-05,
197
+ "loss": 0.4836,
198
+ "step": 270
199
+ },
200
+ {
201
+ "epoch": 1.099116781157998,
202
+ "grad_norm": 18.25,
203
+ "learning_rate": 1.9991228300988586e-05,
204
+ "loss": 0.519,
205
+ "step": 280
206
+ },
207
+ {
208
+ "epoch": 1.138370951913641,
209
+ "grad_norm": 26.125,
210
+ "learning_rate": 1.9984407641819812e-05,
211
+ "loss": 0.4941,
212
+ "step": 290
213
+ },
214
+ {
215
+ "epoch": 1.1776251226692835,
216
+ "grad_norm": 20.5,
217
+ "learning_rate": 1.9975640502598243e-05,
218
+ "loss": 0.4484,
219
+ "step": 300
220
+ },
221
+ {
222
+ "epoch": 1.2168792934249264,
223
+ "grad_norm": 24.625,
224
+ "learning_rate": 1.9964928592495046e-05,
225
+ "loss": 0.5169,
226
+ "step": 310
227
+ },
228
+ {
229
+ "epoch": 1.2561334641805693,
230
+ "grad_norm": 14.125,
231
+ "learning_rate": 1.9952273999818312e-05,
232
+ "loss": 0.4417,
233
+ "step": 320
234
+ },
235
+ {
236
+ "epoch": 1.295387634936212,
237
+ "grad_norm": 26.25,
238
+ "learning_rate": 1.9937679191605964e-05,
239
+ "loss": 0.4361,
240
+ "step": 330
241
+ },
242
+ {
243
+ "epoch": 1.3346418056918548,
244
+ "grad_norm": 23.125,
245
+ "learning_rate": 1.9921147013144782e-05,
246
+ "loss": 0.4454,
247
+ "step": 340
248
+ },
249
+ {
250
+ "epoch": 1.3738959764474976,
251
+ "grad_norm": 23.625,
252
+ "learning_rate": 1.9902680687415704e-05,
253
+ "loss": 0.446,
254
+ "step": 350
255
+ },
256
+ {
257
+ "epoch": 1.4131501472031402,
258
+ "grad_norm": 13.9375,
259
+ "learning_rate": 1.988228381446553e-05,
260
+ "loss": 0.4006,
261
+ "step": 360
262
+ },
263
+ {
264
+ "epoch": 1.452404317958783,
265
+ "grad_norm": 31.5,
266
+ "learning_rate": 1.985996037070505e-05,
267
+ "loss": 0.4577,
268
+ "step": 370
269
+ },
270
+ {
271
+ "epoch": 1.491658488714426,
272
+ "grad_norm": 47.5,
273
+ "learning_rate": 1.983571470813386e-05,
274
+ "loss": 0.4207,
275
+ "step": 380
276
+ },
277
+ {
278
+ "epoch": 1.5309126594700686,
279
+ "grad_norm": 12.3125,
280
+ "learning_rate": 1.9809551553491918e-05,
281
+ "loss": 0.4251,
282
+ "step": 390
283
+ },
284
+ {
285
+ "epoch": 1.5701668302257115,
286
+ "grad_norm": 20.75,
287
+ "learning_rate": 1.9781476007338058e-05,
288
+ "loss": 0.3968,
289
+ "step": 400
290
+ },
291
+ {
292
+ "epoch": 1.6094210009813543,
293
+ "grad_norm": 15.0,
294
+ "learning_rate": 1.9751493543055634e-05,
295
+ "loss": 0.4019,
296
+ "step": 410
297
+ },
298
+ {
299
+ "epoch": 1.648675171736997,
300
+ "grad_norm": 11.5,
301
+ "learning_rate": 1.9719610005785466e-05,
302
+ "loss": 0.3894,
303
+ "step": 420
304
+ },
305
+ {
306
+ "epoch": 1.6879293424926398,
307
+ "grad_norm": 20.375,
308
+ "learning_rate": 1.9685831611286312e-05,
309
+ "loss": 0.3923,
310
+ "step": 430
311
+ },
312
+ {
313
+ "epoch": 1.7271835132482827,
314
+ "grad_norm": 16.75,
315
+ "learning_rate": 1.9650164944723116e-05,
316
+ "loss": 0.374,
317
+ "step": 440
318
+ },
319
+ {
320
+ "epoch": 1.7664376840039253,
321
+ "grad_norm": 21.375,
322
+ "learning_rate": 1.961261695938319e-05,
323
+ "loss": 0.402,
324
+ "step": 450
325
+ },
326
+ {
327
+ "epoch": 1.8056918547595682,
328
+ "grad_norm": 14.25,
329
+ "learning_rate": 1.9573194975320672e-05,
330
+ "loss": 0.3646,
331
+ "step": 460
332
+ },
333
+ {
334
+ "epoch": 1.844946025515211,
335
+ "grad_norm": 15.0625,
336
+ "learning_rate": 1.9531906677929472e-05,
337
+ "loss": 0.3451,
338
+ "step": 470
339
+ },
340
+ {
341
+ "epoch": 1.8842001962708537,
342
+ "grad_norm": 12.75,
343
+ "learning_rate": 1.9488760116444966e-05,
344
+ "loss": 0.3373,
345
+ "step": 480
346
+ },
347
+ {
348
+ "epoch": 1.9234543670264965,
349
+ "grad_norm": 12.5,
350
+ "learning_rate": 1.944376370237481e-05,
351
+ "loss": 0.3693,
352
+ "step": 490
353
+ },
354
+ {
355
+ "epoch": 1.9627085377821394,
356
+ "grad_norm": 2.34375,
357
+ "learning_rate": 1.9396926207859085e-05,
358
+ "loss": 0.3595,
359
+ "step": 500
360
+ }
361
+ ],
362
+ "logging_steps": 10,
363
+ "max_steps": 2500,
364
+ "num_input_tokens_seen": 0,
365
+ "num_train_epochs": 10,
366
+ "save_steps": 500,
367
+ "stateful_callbacks": {
368
+ "TrainerControl": {
369
+ "args": {
370
+ "should_epoch_stop": false,
371
+ "should_evaluate": false,
372
+ "should_log": false,
373
+ "should_save": true,
374
+ "should_training_stop": false
375
+ },
376
+ "attributes": {}
377
+ }
378
+ },
379
+ "total_flos": 1.91328310788096e+17,
380
+ "train_batch_size": 4,
381
+ "trial_name": null,
382
+ "trial_params": null
383
+ }
checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a04ba1fea5935bbaf15347f768d6702bcc3e5630f2593cba25a017368a06442
3
+ size 5432