Doowon96 commited on
Commit
072599a
1 Parent(s): b597518

Training in progress, step 1500

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:32829ac41fa7d2ff1128f275e22cdd36477e6ae03d652fe1eb6c21605e21cf60
3
  size 442518124
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f1e97e80984b8c703c5df8fbd30ef068a73a57bbc1e84e480b96e2d8ee40ff
3
  size 442518124
run-2/checkpoint-1000/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "klue/roberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2",
18
+ "3": "LABEL_3",
19
+ "4": "LABEL_4",
20
+ "5": "LABEL_5",
21
+ "6": "LABEL_6"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "LABEL_0": 0,
27
+ "LABEL_1": 1,
28
+ "LABEL_2": 2,
29
+ "LABEL_3": 3,
30
+ "LABEL_4": 4,
31
+ "LABEL_5": 5,
32
+ "LABEL_6": 6
33
+ },
34
+ "layer_norm_eps": 1e-05,
35
+ "max_position_embeddings": 514,
36
+ "model_type": "roberta",
37
+ "num_attention_heads": 12,
38
+ "num_hidden_layers": 12,
39
+ "pad_token_id": 1,
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "tokenizer_class": "BertTokenizer",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.37.0",
45
+ "type_vocab_size": 1,
46
+ "use_cache": true,
47
+ "vocab_size": 32000
48
+ }
run-2/checkpoint-1000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32829ac41fa7d2ff1128f275e22cdd36477e6ae03d652fe1eb6c21605e21cf60
3
+ size 442518124
run-2/checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5f4da5b16a52e52edf28a690da6a0cbb917965447aa00df376e7a8928a470fe
3
+ size 885156090
run-2/checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfa29879bfeb52dbe9143232bc4a10111256a751b7c7dfeba61e129bf6be84a1
3
+ size 14244
run-2/checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afeefb1fb1d75e6183fe25724d639490d27a5c5d144cd8fc9769e05d6b32b92f
3
+ size 1064
run-2/checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
run-2/checkpoint-1000/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": false,
49
+ "eos_token": "[SEP]",
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 512,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
run-2/checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8381595689513368,
3
+ "best_model_checkpoint": "test-klue/ynat/run-2/checkpoint-500",
4
+ "epoch": 1.7513134851138354,
5
+ "eval_steps": 50,
6
+ "global_step": 1000,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09,
13
+ "learning_rate": 9.520888692829572e-06,
14
+ "loss": 1.9542,
15
+ "step": 50
16
+ },
17
+ {
18
+ "epoch": 0.09,
19
+ "eval_f1": 0.04583937972534243,
20
+ "eval_loss": 1.94540536403656,
21
+ "eval_runtime": 12.7715,
22
+ "eval_samples_per_second": 713.073,
23
+ "eval_steps_per_second": 1.409,
24
+ "step": 50
25
+ },
26
+ {
27
+ "epoch": 0.18,
28
+ "learning_rate": 9.307798077502143e-06,
29
+ "loss": 1.6697,
30
+ "step": 100
31
+ },
32
+ {
33
+ "epoch": 0.18,
34
+ "eval_f1": 0.5702339339536844,
35
+ "eval_loss": 1.5132834911346436,
36
+ "eval_runtime": 13.3022,
37
+ "eval_samples_per_second": 684.621,
38
+ "eval_steps_per_second": 1.353,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.26,
43
+ "learning_rate": 9.094707462174712e-06,
44
+ "loss": 0.8971,
45
+ "step": 150
46
+ },
47
+ {
48
+ "epoch": 0.26,
49
+ "eval_f1": 0.7432151839966054,
50
+ "eval_loss": 0.97530198097229,
51
+ "eval_runtime": 12.9877,
52
+ "eval_samples_per_second": 701.204,
53
+ "eval_steps_per_second": 1.386,
54
+ "step": 150
55
+ },
56
+ {
57
+ "epoch": 0.35,
58
+ "learning_rate": 8.881616846847283e-06,
59
+ "loss": 0.6418,
60
+ "step": 200
61
+ },
62
+ {
63
+ "epoch": 0.35,
64
+ "eval_f1": 0.7297934771119111,
65
+ "eval_loss": 0.8931246995925903,
66
+ "eval_runtime": 12.7184,
67
+ "eval_samples_per_second": 716.049,
68
+ "eval_steps_per_second": 1.415,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.44,
73
+ "learning_rate": 8.668526231519852e-06,
74
+ "loss": 0.6438,
75
+ "step": 250
76
+ },
77
+ {
78
+ "epoch": 0.44,
79
+ "eval_f1": 0.7761230585561165,
80
+ "eval_loss": 0.7596781849861145,
81
+ "eval_runtime": 12.6711,
82
+ "eval_samples_per_second": 718.722,
83
+ "eval_steps_per_second": 1.421,
84
+ "step": 250
85
+ },
86
+ {
87
+ "epoch": 0.53,
88
+ "learning_rate": 8.455435616192421e-06,
89
+ "loss": 0.5509,
90
+ "step": 300
91
+ },
92
+ {
93
+ "epoch": 0.53,
94
+ "eval_f1": 0.8095769257080263,
95
+ "eval_loss": 0.6825068593025208,
96
+ "eval_runtime": 12.713,
97
+ "eval_samples_per_second": 716.351,
98
+ "eval_steps_per_second": 1.416,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.61,
103
+ "learning_rate": 8.242345000864992e-06,
104
+ "loss": 0.45,
105
+ "step": 350
106
+ },
107
+ {
108
+ "epoch": 0.61,
109
+ "eval_f1": 0.820467350641947,
110
+ "eval_loss": 0.6184367537498474,
111
+ "eval_runtime": 12.8329,
112
+ "eval_samples_per_second": 709.662,
113
+ "eval_steps_per_second": 1.403,
114
+ "step": 350
115
+ },
116
+ {
117
+ "epoch": 0.7,
118
+ "learning_rate": 8.029254385537563e-06,
119
+ "loss": 0.4877,
120
+ "step": 400
121
+ },
122
+ {
123
+ "epoch": 0.7,
124
+ "eval_f1": 0.8192647088110643,
125
+ "eval_loss": 0.5932053327560425,
126
+ "eval_runtime": 12.9021,
127
+ "eval_samples_per_second": 705.857,
128
+ "eval_steps_per_second": 1.395,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.79,
133
+ "learning_rate": 7.816163770210132e-06,
134
+ "loss": 0.5672,
135
+ "step": 450
136
+ },
137
+ {
138
+ "epoch": 0.79,
139
+ "eval_f1": 0.8008517524970246,
140
+ "eval_loss": 0.6429001092910767,
141
+ "eval_runtime": 12.7777,
142
+ "eval_samples_per_second": 712.728,
143
+ "eval_steps_per_second": 1.409,
144
+ "step": 450
145
+ },
146
+ {
147
+ "epoch": 0.88,
148
+ "learning_rate": 7.603073154882702e-06,
149
+ "loss": 0.5631,
150
+ "step": 500
151
+ },
152
+ {
153
+ "epoch": 0.88,
154
+ "eval_f1": 0.8381595689513368,
155
+ "eval_loss": 0.5315341353416443,
156
+ "eval_runtime": 12.733,
157
+ "eval_samples_per_second": 715.231,
158
+ "eval_steps_per_second": 1.414,
159
+ "step": 500
160
+ },
161
+ {
162
+ "epoch": 0.96,
163
+ "learning_rate": 7.3899825395552714e-06,
164
+ "loss": 0.5015,
165
+ "step": 550
166
+ },
167
+ {
168
+ "epoch": 0.96,
169
+ "eval_f1": 0.8180551239783007,
170
+ "eval_loss": 0.6169003844261169,
171
+ "eval_runtime": 12.9102,
172
+ "eval_samples_per_second": 705.412,
173
+ "eval_steps_per_second": 1.394,
174
+ "step": 550
175
+ },
176
+ {
177
+ "epoch": 1.05,
178
+ "learning_rate": 7.176891924227842e-06,
179
+ "loss": 0.4257,
180
+ "step": 600
181
+ },
182
+ {
183
+ "epoch": 1.05,
184
+ "eval_f1": 0.817923619273461,
185
+ "eval_loss": 0.6071110963821411,
186
+ "eval_runtime": 13.735,
187
+ "eval_samples_per_second": 663.053,
188
+ "eval_steps_per_second": 1.311,
189
+ "step": 600
190
+ },
191
+ {
192
+ "epoch": 1.14,
193
+ "learning_rate": 6.963801308900412e-06,
194
+ "loss": 0.3327,
195
+ "step": 650
196
+ },
197
+ {
198
+ "epoch": 1.14,
199
+ "eval_f1": 0.826695023066157,
200
+ "eval_loss": 0.5854237675666809,
201
+ "eval_runtime": 13.1401,
202
+ "eval_samples_per_second": 693.07,
203
+ "eval_steps_per_second": 1.37,
204
+ "step": 650
205
+ },
206
+ {
207
+ "epoch": 1.23,
208
+ "learning_rate": 6.7507106935729815e-06,
209
+ "loss": 0.403,
210
+ "step": 700
211
+ },
212
+ {
213
+ "epoch": 1.23,
214
+ "eval_f1": 0.8378257942819006,
215
+ "eval_loss": 0.5514610409736633,
216
+ "eval_runtime": 12.6701,
217
+ "eval_samples_per_second": 718.777,
218
+ "eval_steps_per_second": 1.421,
219
+ "step": 700
220
+ },
221
+ {
222
+ "epoch": 1.31,
223
+ "learning_rate": 6.537620078245552e-06,
224
+ "loss": 0.3681,
225
+ "step": 750
226
+ },
227
+ {
228
+ "epoch": 1.31,
229
+ "eval_f1": 0.8209323144303958,
230
+ "eval_loss": 0.6371071338653564,
231
+ "eval_runtime": 13.0054,
232
+ "eval_samples_per_second": 700.248,
233
+ "eval_steps_per_second": 1.384,
234
+ "step": 750
235
+ },
236
+ {
237
+ "epoch": 1.4,
238
+ "learning_rate": 6.324529462918122e-06,
239
+ "loss": 0.3236,
240
+ "step": 800
241
+ },
242
+ {
243
+ "epoch": 1.4,
244
+ "eval_f1": 0.8376510055572749,
245
+ "eval_loss": 0.5530755519866943,
246
+ "eval_runtime": 12.7123,
247
+ "eval_samples_per_second": 716.393,
248
+ "eval_steps_per_second": 1.416,
249
+ "step": 800
250
+ },
251
+ {
252
+ "epoch": 1.49,
253
+ "learning_rate": 6.111438847590692e-06,
254
+ "loss": 0.4138,
255
+ "step": 850
256
+ },
257
+ {
258
+ "epoch": 1.49,
259
+ "eval_f1": 0.8451899128476951,
260
+ "eval_loss": 0.5386557579040527,
261
+ "eval_runtime": 12.9136,
262
+ "eval_samples_per_second": 705.227,
263
+ "eval_steps_per_second": 1.394,
264
+ "step": 850
265
+ },
266
+ {
267
+ "epoch": 1.58,
268
+ "learning_rate": 5.8983482322632625e-06,
269
+ "loss": 0.3877,
270
+ "step": 900
271
+ },
272
+ {
273
+ "epoch": 1.58,
274
+ "eval_f1": 0.8396443482352846,
275
+ "eval_loss": 0.5918898582458496,
276
+ "eval_runtime": 12.97,
277
+ "eval_samples_per_second": 702.16,
278
+ "eval_steps_per_second": 1.388,
279
+ "step": 900
280
+ },
281
+ {
282
+ "epoch": 1.66,
283
+ "learning_rate": 5.6852576169358325e-06,
284
+ "loss": 0.4276,
285
+ "step": 950
286
+ },
287
+ {
288
+ "epoch": 1.66,
289
+ "eval_f1": 0.8384743295885987,
290
+ "eval_loss": 0.5536447763442993,
291
+ "eval_runtime": 12.8415,
292
+ "eval_samples_per_second": 709.185,
293
+ "eval_steps_per_second": 1.402,
294
+ "step": 950
295
+ },
296
+ {
297
+ "epoch": 1.75,
298
+ "learning_rate": 5.472167001608402e-06,
299
+ "loss": 0.3926,
300
+ "step": 1000
301
+ },
302
+ {
303
+ "epoch": 1.75,
304
+ "eval_f1": 0.8172426280935748,
305
+ "eval_loss": 0.6521010398864746,
306
+ "eval_runtime": 12.7489,
307
+ "eval_samples_per_second": 714.335,
308
+ "eval_steps_per_second": 1.412,
309
+ "step": 1000
310
+ }
311
+ ],
312
+ "logging_steps": 50,
313
+ "max_steps": 2284,
314
+ "num_input_tokens_seen": 0,
315
+ "num_train_epochs": 4,
316
+ "save_steps": 500,
317
+ "total_flos": 81469417756800.0,
318
+ "train_batch_size": 8,
319
+ "trial_name": null,
320
+ "trial_params": {
321
+ "learning_rate": 9.520888692829572e-06,
322
+ "num_train_epochs": 4,
323
+ "per_device_train_batch_size": 8,
324
+ "seed": 4
325
+ }
326
+ }
run-2/checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffebf7e3158659f9acfc5251a7413b79224a3042a0cc4d9f259be5d9dbc266cd
3
+ size 4728
run-2/checkpoint-1000/vocab.txt ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-1500/config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "klue/roberta-base",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "LABEL_0",
16
+ "1": "LABEL_1",
17
+ "2": "LABEL_2",
18
+ "3": "LABEL_3",
19
+ "4": "LABEL_4",
20
+ "5": "LABEL_5",
21
+ "6": "LABEL_6"
22
+ },
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 3072,
25
+ "label2id": {
26
+ "LABEL_0": 0,
27
+ "LABEL_1": 1,
28
+ "LABEL_2": 2,
29
+ "LABEL_3": 3,
30
+ "LABEL_4": 4,
31
+ "LABEL_5": 5,
32
+ "LABEL_6": 6
33
+ },
34
+ "layer_norm_eps": 1e-05,
35
+ "max_position_embeddings": 514,
36
+ "model_type": "roberta",
37
+ "num_attention_heads": 12,
38
+ "num_hidden_layers": 12,
39
+ "pad_token_id": 1,
40
+ "position_embedding_type": "absolute",
41
+ "problem_type": "single_label_classification",
42
+ "tokenizer_class": "BertTokenizer",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.37.0",
45
+ "type_vocab_size": 1,
46
+ "use_cache": true,
47
+ "vocab_size": 32000
48
+ }
run-2/checkpoint-1500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17f1e97e80984b8c703c5df8fbd30ef068a73a57bbc1e84e480b96e2d8ee40ff
3
+ size 442518124
run-2/checkpoint-1500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0852145dbedd3d255be66c9beab141be9e64410d3de1fcc836278111774e6a
3
+ size 885156090
run-2/checkpoint-1500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00e674e1c53bc3c38e08621d5e0b2e51a3c1e08b1238d1cd7a57dff5ebf25d88
3
+ size 14244
run-2/checkpoint-1500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52875a2cd32e5a9687b756a6ffed88cddf8f2fc62d96d872e43b52b8cd79d6c6
3
+ size 1064
run-2/checkpoint-1500/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "[CLS]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "[SEP]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "[MASK]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "[PAD]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "[SEP]",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "[UNK]",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
run-2/checkpoint-1500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
run-2/checkpoint-1500/tokenizer_config.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[CLS]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[PAD]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[SEP]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "[CLS]",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "[CLS]",
47
+ "do_basic_tokenize": true,
48
+ "do_lower_case": false,
49
+ "eos_token": "[SEP]",
50
+ "mask_token": "[MASK]",
51
+ "model_max_length": 512,
52
+ "never_split": null,
53
+ "pad_token": "[PAD]",
54
+ "sep_token": "[SEP]",
55
+ "strip_accents": null,
56
+ "tokenize_chinese_chars": true,
57
+ "tokenizer_class": "BertTokenizer",
58
+ "unk_token": "[UNK]"
59
+ }
run-2/checkpoint-1500/trainer_state.json ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8420632349860007,
3
+ "best_model_checkpoint": "test-klue/ynat/run-2/checkpoint-1500",
4
+ "epoch": 2.626970227670753,
5
+ "eval_steps": 50,
6
+ "global_step": 1500,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09,
13
+ "learning_rate": 9.520888692829572e-06,
14
+ "loss": 1.9542,
15
+ "step": 50
16
+ },
17
+ {
18
+ "epoch": 0.09,
19
+ "eval_f1": 0.04583937972534243,
20
+ "eval_loss": 1.94540536403656,
21
+ "eval_runtime": 12.7715,
22
+ "eval_samples_per_second": 713.073,
23
+ "eval_steps_per_second": 1.409,
24
+ "step": 50
25
+ },
26
+ {
27
+ "epoch": 0.18,
28
+ "learning_rate": 9.307798077502143e-06,
29
+ "loss": 1.6697,
30
+ "step": 100
31
+ },
32
+ {
33
+ "epoch": 0.18,
34
+ "eval_f1": 0.5702339339536844,
35
+ "eval_loss": 1.5132834911346436,
36
+ "eval_runtime": 13.3022,
37
+ "eval_samples_per_second": 684.621,
38
+ "eval_steps_per_second": 1.353,
39
+ "step": 100
40
+ },
41
+ {
42
+ "epoch": 0.26,
43
+ "learning_rate": 9.094707462174712e-06,
44
+ "loss": 0.8971,
45
+ "step": 150
46
+ },
47
+ {
48
+ "epoch": 0.26,
49
+ "eval_f1": 0.7432151839966054,
50
+ "eval_loss": 0.97530198097229,
51
+ "eval_runtime": 12.9877,
52
+ "eval_samples_per_second": 701.204,
53
+ "eval_steps_per_second": 1.386,
54
+ "step": 150
55
+ },
56
+ {
57
+ "epoch": 0.35,
58
+ "learning_rate": 8.881616846847283e-06,
59
+ "loss": 0.6418,
60
+ "step": 200
61
+ },
62
+ {
63
+ "epoch": 0.35,
64
+ "eval_f1": 0.7297934771119111,
65
+ "eval_loss": 0.8931246995925903,
66
+ "eval_runtime": 12.7184,
67
+ "eval_samples_per_second": 716.049,
68
+ "eval_steps_per_second": 1.415,
69
+ "step": 200
70
+ },
71
+ {
72
+ "epoch": 0.44,
73
+ "learning_rate": 8.668526231519852e-06,
74
+ "loss": 0.6438,
75
+ "step": 250
76
+ },
77
+ {
78
+ "epoch": 0.44,
79
+ "eval_f1": 0.7761230585561165,
80
+ "eval_loss": 0.7596781849861145,
81
+ "eval_runtime": 12.6711,
82
+ "eval_samples_per_second": 718.722,
83
+ "eval_steps_per_second": 1.421,
84
+ "step": 250
85
+ },
86
+ {
87
+ "epoch": 0.53,
88
+ "learning_rate": 8.455435616192421e-06,
89
+ "loss": 0.5509,
90
+ "step": 300
91
+ },
92
+ {
93
+ "epoch": 0.53,
94
+ "eval_f1": 0.8095769257080263,
95
+ "eval_loss": 0.6825068593025208,
96
+ "eval_runtime": 12.713,
97
+ "eval_samples_per_second": 716.351,
98
+ "eval_steps_per_second": 1.416,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.61,
103
+ "learning_rate": 8.242345000864992e-06,
104
+ "loss": 0.45,
105
+ "step": 350
106
+ },
107
+ {
108
+ "epoch": 0.61,
109
+ "eval_f1": 0.820467350641947,
110
+ "eval_loss": 0.6184367537498474,
111
+ "eval_runtime": 12.8329,
112
+ "eval_samples_per_second": 709.662,
113
+ "eval_steps_per_second": 1.403,
114
+ "step": 350
115
+ },
116
+ {
117
+ "epoch": 0.7,
118
+ "learning_rate": 8.029254385537563e-06,
119
+ "loss": 0.4877,
120
+ "step": 400
121
+ },
122
+ {
123
+ "epoch": 0.7,
124
+ "eval_f1": 0.8192647088110643,
125
+ "eval_loss": 0.5932053327560425,
126
+ "eval_runtime": 12.9021,
127
+ "eval_samples_per_second": 705.857,
128
+ "eval_steps_per_second": 1.395,
129
+ "step": 400
130
+ },
131
+ {
132
+ "epoch": 0.79,
133
+ "learning_rate": 7.816163770210132e-06,
134
+ "loss": 0.5672,
135
+ "step": 450
136
+ },
137
+ {
138
+ "epoch": 0.79,
139
+ "eval_f1": 0.8008517524970246,
140
+ "eval_loss": 0.6429001092910767,
141
+ "eval_runtime": 12.7777,
142
+ "eval_samples_per_second": 712.728,
143
+ "eval_steps_per_second": 1.409,
144
+ "step": 450
145
+ },
146
+ {
147
+ "epoch": 0.88,
148
+ "learning_rate": 7.603073154882702e-06,
149
+ "loss": 0.5631,
150
+ "step": 500
151
+ },
152
+ {
153
+ "epoch": 0.88,
154
+ "eval_f1": 0.8381595689513368,
155
+ "eval_loss": 0.5315341353416443,
156
+ "eval_runtime": 12.733,
157
+ "eval_samples_per_second": 715.231,
158
+ "eval_steps_per_second": 1.414,
159
+ "step": 500
160
+ },
161
+ {
162
+ "epoch": 0.96,
163
+ "learning_rate": 7.3899825395552714e-06,
164
+ "loss": 0.5015,
165
+ "step": 550
166
+ },
167
+ {
168
+ "epoch": 0.96,
169
+ "eval_f1": 0.8180551239783007,
170
+ "eval_loss": 0.6169003844261169,
171
+ "eval_runtime": 12.9102,
172
+ "eval_samples_per_second": 705.412,
173
+ "eval_steps_per_second": 1.394,
174
+ "step": 550
175
+ },
176
+ {
177
+ "epoch": 1.05,
178
+ "learning_rate": 7.176891924227842e-06,
179
+ "loss": 0.4257,
180
+ "step": 600
181
+ },
182
+ {
183
+ "epoch": 1.05,
184
+ "eval_f1": 0.817923619273461,
185
+ "eval_loss": 0.6071110963821411,
186
+ "eval_runtime": 13.735,
187
+ "eval_samples_per_second": 663.053,
188
+ "eval_steps_per_second": 1.311,
189
+ "step": 600
190
+ },
191
+ {
192
+ "epoch": 1.14,
193
+ "learning_rate": 6.963801308900412e-06,
194
+ "loss": 0.3327,
195
+ "step": 650
196
+ },
197
+ {
198
+ "epoch": 1.14,
199
+ "eval_f1": 0.826695023066157,
200
+ "eval_loss": 0.5854237675666809,
201
+ "eval_runtime": 13.1401,
202
+ "eval_samples_per_second": 693.07,
203
+ "eval_steps_per_second": 1.37,
204
+ "step": 650
205
+ },
206
+ {
207
+ "epoch": 1.23,
208
+ "learning_rate": 6.7507106935729815e-06,
209
+ "loss": 0.403,
210
+ "step": 700
211
+ },
212
+ {
213
+ "epoch": 1.23,
214
+ "eval_f1": 0.8378257942819006,
215
+ "eval_loss": 0.5514610409736633,
216
+ "eval_runtime": 12.6701,
217
+ "eval_samples_per_second": 718.777,
218
+ "eval_steps_per_second": 1.421,
219
+ "step": 700
220
+ },
221
+ {
222
+ "epoch": 1.31,
223
+ "learning_rate": 6.537620078245552e-06,
224
+ "loss": 0.3681,
225
+ "step": 750
226
+ },
227
+ {
228
+ "epoch": 1.31,
229
+ "eval_f1": 0.8209323144303958,
230
+ "eval_loss": 0.6371071338653564,
231
+ "eval_runtime": 13.0054,
232
+ "eval_samples_per_second": 700.248,
233
+ "eval_steps_per_second": 1.384,
234
+ "step": 750
235
+ },
236
+ {
237
+ "epoch": 1.4,
238
+ "learning_rate": 6.324529462918122e-06,
239
+ "loss": 0.3236,
240
+ "step": 800
241
+ },
242
+ {
243
+ "epoch": 1.4,
244
+ "eval_f1": 0.8376510055572749,
245
+ "eval_loss": 0.5530755519866943,
246
+ "eval_runtime": 12.7123,
247
+ "eval_samples_per_second": 716.393,
248
+ "eval_steps_per_second": 1.416,
249
+ "step": 800
250
+ },
251
+ {
252
+ "epoch": 1.49,
253
+ "learning_rate": 6.111438847590692e-06,
254
+ "loss": 0.4138,
255
+ "step": 850
256
+ },
257
+ {
258
+ "epoch": 1.49,
259
+ "eval_f1": 0.8451899128476951,
260
+ "eval_loss": 0.5386557579040527,
261
+ "eval_runtime": 12.9136,
262
+ "eval_samples_per_second": 705.227,
263
+ "eval_steps_per_second": 1.394,
264
+ "step": 850
265
+ },
266
+ {
267
+ "epoch": 1.58,
268
+ "learning_rate": 5.8983482322632625e-06,
269
+ "loss": 0.3877,
270
+ "step": 900
271
+ },
272
+ {
273
+ "epoch": 1.58,
274
+ "eval_f1": 0.8396443482352846,
275
+ "eval_loss": 0.5918898582458496,
276
+ "eval_runtime": 12.97,
277
+ "eval_samples_per_second": 702.16,
278
+ "eval_steps_per_second": 1.388,
279
+ "step": 900
280
+ },
281
+ {
282
+ "epoch": 1.66,
283
+ "learning_rate": 5.6852576169358325e-06,
284
+ "loss": 0.4276,
285
+ "step": 950
286
+ },
287
+ {
288
+ "epoch": 1.66,
289
+ "eval_f1": 0.8384743295885987,
290
+ "eval_loss": 0.5536447763442993,
291
+ "eval_runtime": 12.8415,
292
+ "eval_samples_per_second": 709.185,
293
+ "eval_steps_per_second": 1.402,
294
+ "step": 950
295
+ },
296
+ {
297
+ "epoch": 1.75,
298
+ "learning_rate": 5.472167001608402e-06,
299
+ "loss": 0.3926,
300
+ "step": 1000
301
+ },
302
+ {
303
+ "epoch": 1.75,
304
+ "eval_f1": 0.8172426280935748,
305
+ "eval_loss": 0.6521010398864746,
306
+ "eval_runtime": 12.7489,
307
+ "eval_samples_per_second": 714.335,
308
+ "eval_steps_per_second": 1.412,
309
+ "step": 1000
310
+ },
311
+ {
312
+ "epoch": 1.84,
313
+ "learning_rate": 5.2590763862809725e-06,
314
+ "loss": 0.4432,
315
+ "step": 1050
316
+ },
317
+ {
318
+ "epoch": 1.84,
319
+ "eval_f1": 0.8475488904826799,
320
+ "eval_loss": 0.49612656235694885,
321
+ "eval_runtime": 12.9412,
322
+ "eval_samples_per_second": 703.724,
323
+ "eval_steps_per_second": 1.391,
324
+ "step": 1050
325
+ },
326
+ {
327
+ "epoch": 1.93,
328
+ "learning_rate": 5.0459857709535426e-06,
329
+ "loss": 0.4031,
330
+ "step": 1100
331
+ },
332
+ {
333
+ "epoch": 1.93,
334
+ "eval_f1": 0.8534964001754608,
335
+ "eval_loss": 0.5003403425216675,
336
+ "eval_runtime": 13.459,
337
+ "eval_samples_per_second": 676.648,
338
+ "eval_steps_per_second": 1.337,
339
+ "step": 1100
340
+ },
341
+ {
342
+ "epoch": 2.01,
343
+ "learning_rate": 4.832895155626112e-06,
344
+ "loss": 0.403,
345
+ "step": 1150
346
+ },
347
+ {
348
+ "epoch": 2.01,
349
+ "eval_f1": 0.850177595861797,
350
+ "eval_loss": 0.48595184087753296,
351
+ "eval_runtime": 13.0479,
352
+ "eval_samples_per_second": 697.967,
353
+ "eval_steps_per_second": 1.38,
354
+ "step": 1150
355
+ },
356
+ {
357
+ "epoch": 2.1,
358
+ "learning_rate": 4.619804540298683e-06,
359
+ "loss": 0.2496,
360
+ "step": 1200
361
+ },
362
+ {
363
+ "epoch": 2.1,
364
+ "eval_f1": 0.8431021077169384,
365
+ "eval_loss": 0.5751626491546631,
366
+ "eval_runtime": 12.7058,
367
+ "eval_samples_per_second": 716.759,
368
+ "eval_steps_per_second": 1.417,
369
+ "step": 1200
370
+ },
371
+ {
372
+ "epoch": 2.19,
373
+ "learning_rate": 4.406713924971253e-06,
374
+ "loss": 0.257,
375
+ "step": 1250
376
+ },
377
+ {
378
+ "epoch": 2.19,
379
+ "eval_f1": 0.8475870877960617,
380
+ "eval_loss": 0.5579206347465515,
381
+ "eval_runtime": 12.6855,
382
+ "eval_samples_per_second": 717.908,
383
+ "eval_steps_per_second": 1.419,
384
+ "step": 1250
385
+ },
386
+ {
387
+ "epoch": 2.28,
388
+ "learning_rate": 4.193623309643822e-06,
389
+ "loss": 0.3404,
390
+ "step": 1300
391
+ },
392
+ {
393
+ "epoch": 2.28,
394
+ "eval_f1": 0.8376321081317408,
395
+ "eval_loss": 0.6273791790008545,
396
+ "eval_runtime": 12.7187,
397
+ "eval_samples_per_second": 716.034,
398
+ "eval_steps_per_second": 1.415,
399
+ "step": 1300
400
+ },
401
+ {
402
+ "epoch": 2.36,
403
+ "learning_rate": 3.980532694316393e-06,
404
+ "loss": 0.2248,
405
+ "step": 1350
406
+ },
407
+ {
408
+ "epoch": 2.36,
409
+ "eval_f1": 0.8374320586864977,
410
+ "eval_loss": 0.6183853149414062,
411
+ "eval_runtime": 12.8619,
412
+ "eval_samples_per_second": 708.058,
413
+ "eval_steps_per_second": 1.399,
414
+ "step": 1350
415
+ },
416
+ {
417
+ "epoch": 2.45,
418
+ "learning_rate": 3.7674420789889623e-06,
419
+ "loss": 0.335,
420
+ "step": 1400
421
+ },
422
+ {
423
+ "epoch": 2.45,
424
+ "eval_f1": 0.8499224527475217,
425
+ "eval_loss": 0.5527887940406799,
426
+ "eval_runtime": 12.9423,
427
+ "eval_samples_per_second": 703.662,
428
+ "eval_steps_per_second": 1.391,
429
+ "step": 1400
430
+ },
431
+ {
432
+ "epoch": 2.54,
433
+ "learning_rate": 3.5543514636615323e-06,
434
+ "loss": 0.3058,
435
+ "step": 1450
436
+ },
437
+ {
438
+ "epoch": 2.54,
439
+ "eval_f1": 0.8389360164023282,
440
+ "eval_loss": 0.6216479539871216,
441
+ "eval_runtime": 12.7941,
442
+ "eval_samples_per_second": 711.813,
443
+ "eval_steps_per_second": 1.407,
444
+ "step": 1450
445
+ },
446
+ {
447
+ "epoch": 2.63,
448
+ "learning_rate": 3.341260848334102e-06,
449
+ "loss": 0.2675,
450
+ "step": 1500
451
+ },
452
+ {
453
+ "epoch": 2.63,
454
+ "eval_f1": 0.8420632349860007,
455
+ "eval_loss": 0.6038542985916138,
456
+ "eval_runtime": 12.7939,
457
+ "eval_samples_per_second": 711.826,
458
+ "eval_steps_per_second": 1.407,
459
+ "step": 1500
460
+ }
461
+ ],
462
+ "logging_steps": 50,
463
+ "max_steps": 2284,
464
+ "num_input_tokens_seen": 0,
465
+ "num_train_epochs": 4,
466
+ "save_steps": 500,
467
+ "total_flos": 122130123328800.0,
468
+ "train_batch_size": 8,
469
+ "trial_name": null,
470
+ "trial_params": {
471
+ "learning_rate": 9.520888692829572e-06,
472
+ "num_train_epochs": 4,
473
+ "per_device_train_batch_size": 8,
474
+ "seed": 4
475
+ }
476
+ }
run-2/checkpoint-1500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffebf7e3158659f9acfc5251a7413b79224a3042a0cc4d9f259be5d9dbc266cd
3
+ size 4728
run-2/checkpoint-1500/vocab.txt ADDED
The diff for this file is too large to render. See raw diff