nhatminh commited on
Commit
a62b897
1 Parent(s): dca48d9

Upload 8 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "jinaai/jina-reranker-v2-base-multilingual",
3
+ "architectures": ["XLMRobertaForSequenceClassification"],
4
+ "attention_probs_dropout_prob": 0.1,
5
+ "auto_map": {
6
+ "AutoConfig": "configuration_xlm_roberta.XLMRobertaFlashConfig",
7
+ "AutoModel": "modeling_xlm_roberta.XLMRobertaModel",
8
+ "AutoModelForSequenceClassification": "modeling_xlm_roberta.XLMRobertaForSequenceClassification"
9
+ },
10
+ "bos_token_id": 0,
11
+ "classifier_dropout": null,
12
+ "emb_pooler": null,
13
+ "eos_token_id": 2,
14
+ "hidden_act": "gelu",
15
+ "hidden_dropout_prob": 0.1,
16
+ "hidden_size": 768,
17
+ "num_labels": 1,
18
+ "id2label": {
19
+ "0": "LABEL_0"
20
+ },
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "LABEL_0": 0
25
+ },
26
+ "layer_norm_eps": 1e-5,
27
+ "max_position_embeddings": 1026,
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
+ "output_past": true,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.40.0",
35
+ "type_vocab_size": 1,
36
+ "use_cache": false,
37
+ "use_flash_attn": true,
38
+ "vocab_size": 250002
39
+ }
configuration_xlm_roberta.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ import torch
3
+
4
+ class XLMRobertaFlashConfig(PretrainedConfig):
5
+ def __init__(
6
+ self,
7
+ vocab_size=30522,
8
+ hidden_size=768,
9
+ num_hidden_layers=12,
10
+ num_attention_heads=12,
11
+ intermediate_size=3072,
12
+ hidden_act="gelu",
13
+ hidden_dropout_prob=0.1,
14
+ attention_probs_dropout_prob=0.1,
15
+ max_position_embeddings=512,
16
+ type_vocab_size=2,
17
+ initializer_range=0.02,
18
+ layer_norm_eps=1e-12,
19
+ pad_token_id=1,
20
+ bos_token_id=0,
21
+ eos_token_id=2,
22
+ position_embedding_type="absolute",
23
+ use_cache=True,
24
+ classifier_dropout=None,
25
+ lora_adaptations=None,
26
+ lora_rank=4,
27
+ lora_dropout_p=0.0,
28
+ lora_alpha=1,
29
+ lora_main_params_trainable=False,
30
+ load_trained_adapters=False,
31
+ use_flash_attn=True,
32
+ torch_dtype=None,
33
+ emb_pooler=None,
34
+ matryoshka_dimensions=None,
35
+ truncate_dim=None,
36
+ **kwargs,
37
+ ):
38
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
39
+
40
+
41
+ self.vocab_size = vocab_size
42
+ self.hidden_size = hidden_size
43
+ self.num_hidden_layers = num_hidden_layers
44
+ self.num_attention_heads = num_attention_heads
45
+ self.hidden_act = hidden_act
46
+ self.intermediate_size = intermediate_size
47
+ self.hidden_dropout_prob = hidden_dropout_prob
48
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
49
+ self.max_position_embeddings = max_position_embeddings
50
+ self.type_vocab_size = type_vocab_size
51
+ self.initializer_range = initializer_range
52
+ self.layer_norm_eps = layer_norm_eps
53
+ self.position_embedding_type = position_embedding_type
54
+ self.use_cache = use_cache
55
+ self.classifier_dropout = classifier_dropout
56
+ self.load_trained_adapters = load_trained_adapters
57
+ self.lora_adaptations = lora_adaptations
58
+ self.lora_rank = lora_rank
59
+ self.lora_dropout_p = lora_dropout_p
60
+ self.lora_alpha = lora_alpha
61
+ self.lora_main_params_trainable = lora_main_params_trainable
62
+ self.use_flash_attn = use_flash_attn
63
+ self.emb_pooler = emb_pooler
64
+ self.matryoshka_dimensions = matryoshka_dimensions
65
+ self.truncate_dim = truncate_dim
66
+ if torch_dtype and hasattr(torch, torch_dtype) and type(getattr(torch, torch_dtype)) is torch.dtype:
67
+ self.torch_dtype = getattr(torch, torch_dtype)
68
+ else:
69
+ self.torch_dtype = torch_dtype
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c025167147e001a198eed0d07acba9579be7135ca78ad88ee5f16e7ddea8476
3
+ size 556892306
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1e0376b0ca081a6b0c18125d251f214835d1165944f9eac39baf8d9cf2b15fe
3
+ size 17082832
tokenizer_config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "eos_token": "</s>",
48
+ "mask_token": "<mask>",
49
+ "model_max_length": 1024,
50
+ "pad_token": "<pad>",
51
+ "sep_token": "</s>",
52
+ "tokenizer_class": "XLMRobertaTokenizer",
53
+ "unk_token": "<unk>"
54
+ }
trainer_state.json ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.8243488794669895,
5
+ "eval_steps": 500,
6
+ "global_step": 4518,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.04037956793862306,
13
+ "grad_norm": 3.5625,
14
+ "learning_rate": 5.878836833602585e-05,
15
+ "loss": 0.1271,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.08075913587724612,
20
+ "grad_norm": 2.953125,
21
+ "learning_rate": 5.7576736672051694e-05,
22
+ "loss": 0.0977,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.12113870381586916,
27
+ "grad_norm": 1.7265625,
28
+ "learning_rate": 5.636510500807755e-05,
29
+ "loss": 0.077,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.16151827175449224,
34
+ "grad_norm": 4.90625,
35
+ "learning_rate": 5.5153473344103394e-05,
36
+ "loss": 0.0692,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.20189783969311528,
41
+ "grad_norm": 4.15625,
42
+ "learning_rate": 5.394184168012924e-05,
43
+ "loss": 0.0566,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.24227740763173833,
48
+ "grad_norm": 2.421875,
49
+ "learning_rate": 5.2730210016155086e-05,
50
+ "loss": 0.0594,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.2826569755703614,
55
+ "grad_norm": 2.109375,
56
+ "learning_rate": 5.1518578352180936e-05,
57
+ "loss": 0.0591,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.3230365435089845,
62
+ "grad_norm": 3.21875,
63
+ "learning_rate": 5.030694668820679e-05,
64
+ "loss": 0.068,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.3634161114476075,
69
+ "grad_norm": 2.921875,
70
+ "learning_rate": 4.9095315024232635e-05,
71
+ "loss": 0.0563,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.40379567938623057,
76
+ "grad_norm": 1.0,
77
+ "learning_rate": 4.7883683360258485e-05,
78
+ "loss": 0.0562,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.44417524732485364,
83
+ "grad_norm": 2.46875,
84
+ "learning_rate": 4.667205169628433e-05,
85
+ "loss": 0.0601,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.48455481526347666,
90
+ "grad_norm": 1.6953125,
91
+ "learning_rate": 4.546042003231018e-05,
92
+ "loss": 0.0455,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.5249343832020997,
97
+ "grad_norm": 1.3359375,
98
+ "learning_rate": 4.424878836833603e-05,
99
+ "loss": 0.0472,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.5653139511407228,
104
+ "grad_norm": 1.78125,
105
+ "learning_rate": 4.303715670436188e-05,
106
+ "loss": 0.0561,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.6056935190793459,
111
+ "grad_norm": 1.390625,
112
+ "learning_rate": 4.1825525040387727e-05,
113
+ "loss": 0.0517,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.646073087017969,
118
+ "grad_norm": 0.90625,
119
+ "learning_rate": 4.061389337641357e-05,
120
+ "loss": 0.0449,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.6864526549565919,
125
+ "grad_norm": 3.28125,
126
+ "learning_rate": 3.940226171243942e-05,
127
+ "loss": 0.0425,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.726832222895215,
132
+ "grad_norm": 1.546875,
133
+ "learning_rate": 3.819063004846526e-05,
134
+ "loss": 0.0462,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.7672117908338381,
139
+ "grad_norm": 1.53125,
140
+ "learning_rate": 3.697899838449112e-05,
141
+ "loss": 0.0488,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.8075913587724611,
146
+ "grad_norm": 5.28125,
147
+ "learning_rate": 3.576736672051697e-05,
148
+ "loss": 0.0447,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.8479709267110842,
153
+ "grad_norm": 2.859375,
154
+ "learning_rate": 3.455573505654281e-05,
155
+ "loss": 0.0477,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.8883504946497073,
160
+ "grad_norm": 2.4375,
161
+ "learning_rate": 3.334410339256866e-05,
162
+ "loss": 0.0489,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.9287300625883304,
167
+ "grad_norm": 0.69921875,
168
+ "learning_rate": 3.2132471728594504e-05,
169
+ "loss": 0.0508,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.9691096305269533,
174
+ "grad_norm": 2.34375,
175
+ "learning_rate": 3.092084006462036e-05,
176
+ "loss": 0.0396,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 1.0094891984655765,
181
+ "grad_norm": 1.6015625,
182
+ "learning_rate": 2.9709208400646203e-05,
183
+ "loss": 0.0486,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 1.0498687664041995,
188
+ "grad_norm": 4.6875,
189
+ "learning_rate": 2.8497576736672053e-05,
190
+ "loss": 0.0349,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 1.0902483343428226,
195
+ "grad_norm": 0.671875,
196
+ "learning_rate": 2.72859450726979e-05,
197
+ "loss": 0.0277,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 1.1306279022814456,
202
+ "grad_norm": 1.1484375,
203
+ "learning_rate": 2.607431340872375e-05,
204
+ "loss": 0.0291,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 1.1710074702200686,
209
+ "grad_norm": 3.890625,
210
+ "learning_rate": 2.4862681744749595e-05,
211
+ "loss": 0.0327,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 1.2113870381586918,
216
+ "grad_norm": 1.2109375,
217
+ "learning_rate": 2.3651050080775445e-05,
218
+ "loss": 0.0324,
219
+ "step": 3000
220
+ },
221
+ {
222
+ "epoch": 1.2517666060973147,
223
+ "grad_norm": 1.953125,
224
+ "learning_rate": 2.2439418416801295e-05,
225
+ "loss": 0.0337,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 1.292146174035938,
230
+ "grad_norm": 3.46875,
231
+ "learning_rate": 2.122778675282714e-05,
232
+ "loss": 0.0318,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 1.3325257419745609,
237
+ "grad_norm": 0.703125,
238
+ "learning_rate": 2.0016155088852987e-05,
239
+ "loss": 0.0347,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 1.3729053099131838,
244
+ "grad_norm": 2.875,
245
+ "learning_rate": 1.8804523424878837e-05,
246
+ "loss": 0.0299,
247
+ "step": 3400
248
+ },
249
+ {
250
+ "epoch": 1.413284877851807,
251
+ "grad_norm": 2.078125,
252
+ "learning_rate": 1.7592891760904683e-05,
253
+ "loss": 0.0292,
254
+ "step": 3500
255
+ },
256
+ {
257
+ "epoch": 1.45366444579043,
258
+ "grad_norm": 0.80078125,
259
+ "learning_rate": 1.6381260096930536e-05,
260
+ "loss": 0.0321,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 1.4940440137290532,
265
+ "grad_norm": 2.359375,
266
+ "learning_rate": 1.5169628432956381e-05,
267
+ "loss": 0.0305,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 1.5344235816676761,
272
+ "grad_norm": 0.6171875,
273
+ "learning_rate": 1.395799676898223e-05,
274
+ "loss": 0.0289,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 1.574803149606299,
279
+ "grad_norm": 2.765625,
280
+ "learning_rate": 1.2746365105008077e-05,
281
+ "loss": 0.0304,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 1.6151827175449223,
286
+ "grad_norm": 1.78125,
287
+ "learning_rate": 1.1534733441033925e-05,
288
+ "loss": 0.0346,
289
+ "step": 4000
290
+ },
291
+ {
292
+ "epoch": 1.6555622854835454,
293
+ "grad_norm": 1.0,
294
+ "learning_rate": 1.0323101777059775e-05,
295
+ "loss": 0.033,
296
+ "step": 4100
297
+ },
298
+ {
299
+ "epoch": 1.6959418534221684,
300
+ "grad_norm": 1.8828125,
301
+ "learning_rate": 9.111470113085623e-06,
302
+ "loss": 0.0328,
303
+ "step": 4200
304
+ },
305
+ {
306
+ "epoch": 1.7363214213607914,
307
+ "grad_norm": 1.40625,
308
+ "learning_rate": 7.89983844911147e-06,
309
+ "loss": 0.0306,
310
+ "step": 4300
311
+ },
312
+ {
313
+ "epoch": 1.7767009892994143,
314
+ "grad_norm": 3.375,
315
+ "learning_rate": 6.6882067851373186e-06,
316
+ "loss": 0.0303,
317
+ "step": 4400
318
+ },
319
+ {
320
+ "epoch": 1.8170805572380375,
321
+ "grad_norm": 1.328125,
322
+ "learning_rate": 5.4765751211631666e-06,
323
+ "loss": 0.0312,
324
+ "step": 4500
325
+ }
326
+ ],
327
+ "logging_steps": 100,
328
+ "max_steps": 4952,
329
+ "num_input_tokens_seen": 0,
330
+ "num_train_epochs": 2,
331
+ "save_steps": 502,
332
+ "stateful_callbacks": {
333
+ "TrainerControl": {
334
+ "args": {
335
+ "should_epoch_stop": false,
336
+ "should_evaluate": false,
337
+ "should_log": false,
338
+ "should_save": true,
339
+ "should_training_stop": false
340
+ },
341
+ "attributes": {}
342
+ }
343
+ },
344
+ "total_flos": 0.0,
345
+ "train_batch_size": 6,
346
+ "trial_name": null,
347
+ "trial_params": null
348
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf6873517e92cbd35ea7f156c1b77e377911a4b95ea262711e6378557b626cb8
3
+ size 5176