added tokenizer on top of base model
Browse files- README.md +250 -0
- config.json +24 -0
- flax_model.msgpack +3 -0
- merges.txt +0 -0
- model.safetensors +3 -0
- pytorch_model.bin +3 -0
- rust_model.ot +3 -0
- special_tokens_map.json +51 -0
- tf_model.h5 +3 -0
- tokenizer.json +0 -0
- tokenizer_config.json +59 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: en
|
3 |
+
license: cc-by-4.0
|
4 |
+
datasets:
|
5 |
+
- squad_v2
|
6 |
+
model-index:
|
7 |
+
- name: deepset/roberta-base-squad2
|
8 |
+
results:
|
9 |
+
- task:
|
10 |
+
type: question-answering
|
11 |
+
name: Question Answering
|
12 |
+
dataset:
|
13 |
+
name: squad_v2
|
14 |
+
type: squad_v2
|
15 |
+
config: squad_v2
|
16 |
+
split: validation
|
17 |
+
metrics:
|
18 |
+
- type: exact_match
|
19 |
+
value: 79.9309
|
20 |
+
name: Exact Match
|
21 |
+
verified: true
|
22 |
+
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMDhhNjg5YzNiZGQ1YTIyYTAwZGUwOWEzZTRiYzdjM2QzYjA3ZTUxNDM1NjE1MTUyMjE1MGY1YzEzMjRjYzVjYiIsInZlcnNpb24iOjF9.EH5JJo8EEFwU7osPz3s7qanw_tigeCFhCXjSfyN0Y1nWVnSfulSxIk_DbAEI5iE80V4EKLyp5-mYFodWvL2KDA
|
23 |
+
- type: f1
|
24 |
+
value: 82.9501
|
25 |
+
name: F1
|
26 |
+
verified: true
|
27 |
+
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMjk5ZDYwOGQyNjNkMWI0OTE4YzRmOTlkY2JjNjQ0YTZkNTMzMzNkYTA0MDFmNmI3NjA3NjNlMjhiMDQ2ZjJjNSIsInZlcnNpb24iOjF9.DDm0LNTkdLbGsue58bg1aH_s67KfbcmkvL-6ZiI2s8IoxhHJMSf29H_uV2YLyevwx900t-MwTVOW3qfFnMMEAQ
|
28 |
+
- type: total
|
29 |
+
value: 11869
|
30 |
+
name: total
|
31 |
+
verified: true
|
32 |
+
verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMGFkMmI2ODM0NmY5NGNkNmUxYWViOWYxZDNkY2EzYWFmOWI4N2VhYzY5MGEzMTVhOTU4Zjc4YWViOGNjOWJjMCIsInZlcnNpb24iOjF9.fexrU1icJK5_MiifBtZWkeUvpmFISqBLDXSQJ8E6UnrRof-7cU0s4tX_dIsauHWtUpIHMPZCf5dlMWQKXZuAAA
|
33 |
+
- task:
|
34 |
+
type: question-answering
|
35 |
+
name: Question Answering
|
36 |
+
dataset:
|
37 |
+
name: squad
|
38 |
+
type: squad
|
39 |
+
config: plain_text
|
40 |
+
split: validation
|
41 |
+
metrics:
|
42 |
+
- type: exact_match
|
43 |
+
value: 85.289
|
44 |
+
name: Exact Match
|
45 |
+
- type: f1
|
46 |
+
value: 91.841
|
47 |
+
name: F1
|
48 |
+
- task:
|
49 |
+
type: question-answering
|
50 |
+
name: Question Answering
|
51 |
+
dataset:
|
52 |
+
name: adversarial_qa
|
53 |
+
type: adversarial_qa
|
54 |
+
config: adversarialQA
|
55 |
+
split: validation
|
56 |
+
metrics:
|
57 |
+
- type: exact_match
|
58 |
+
value: 29.500
|
59 |
+
name: Exact Match
|
60 |
+
- type: f1
|
61 |
+
value: 40.367
|
62 |
+
name: F1
|
63 |
+
- task:
|
64 |
+
type: question-answering
|
65 |
+
name: Question Answering
|
66 |
+
dataset:
|
67 |
+
name: squad_adversarial
|
68 |
+
type: squad_adversarial
|
69 |
+
config: AddOneSent
|
70 |
+
split: validation
|
71 |
+
metrics:
|
72 |
+
- type: exact_match
|
73 |
+
value: 78.567
|
74 |
+
name: Exact Match
|
75 |
+
- type: f1
|
76 |
+
value: 84.469
|
77 |
+
name: F1
|
78 |
+
- task:
|
79 |
+
type: question-answering
|
80 |
+
name: Question Answering
|
81 |
+
dataset:
|
82 |
+
name: squadshifts amazon
|
83 |
+
type: squadshifts
|
84 |
+
config: amazon
|
85 |
+
split: test
|
86 |
+
metrics:
|
87 |
+
- type: exact_match
|
88 |
+
value: 69.924
|
89 |
+
name: Exact Match
|
90 |
+
- type: f1
|
91 |
+
value: 83.284
|
92 |
+
name: F1
|
93 |
+
- task:
|
94 |
+
type: question-answering
|
95 |
+
name: Question Answering
|
96 |
+
dataset:
|
97 |
+
name: squadshifts new_wiki
|
98 |
+
type: squadshifts
|
99 |
+
config: new_wiki
|
100 |
+
split: test
|
101 |
+
metrics:
|
102 |
+
- type: exact_match
|
103 |
+
value: 81.204
|
104 |
+
name: Exact Match
|
105 |
+
- type: f1
|
106 |
+
value: 90.595
|
107 |
+
name: F1
|
108 |
+
- task:
|
109 |
+
type: question-answering
|
110 |
+
name: Question Answering
|
111 |
+
dataset:
|
112 |
+
name: squadshifts nyt
|
113 |
+
type: squadshifts
|
114 |
+
config: nyt
|
115 |
+
split: test
|
116 |
+
metrics:
|
117 |
+
- type: exact_match
|
118 |
+
value: 82.931
|
119 |
+
name: Exact Match
|
120 |
+
- type: f1
|
121 |
+
value: 90.756
|
122 |
+
name: F1
|
123 |
+
- task:
|
124 |
+
type: question-answering
|
125 |
+
name: Question Answering
|
126 |
+
dataset:
|
127 |
+
name: squadshifts reddit
|
128 |
+
type: squadshifts
|
129 |
+
config: reddit
|
130 |
+
split: test
|
131 |
+
metrics:
|
132 |
+
- type: exact_match
|
133 |
+
value: 71.550
|
134 |
+
name: Exact Match
|
135 |
+
- type: f1
|
136 |
+
value: 82.939
|
137 |
+
name: F1
|
138 |
+
---
|
139 |
+
|
140 |
+
# roberta-base for QA
|
141 |
+
|
142 |
+
This is the [roberta-base](https://huggingface.co/roberta-base) model, fine-tuned using the [SQuAD2.0](https://huggingface.co/datasets/squad_v2) dataset. It's been trained on question-answer pairs, including unanswerable questions, for the task of Question Answering.
|
143 |
+
|
144 |
+
|
145 |
+
## Overview
|
146 |
+
**Language model:** roberta-base
|
147 |
+
**Language:** English
|
148 |
+
**Downstream-task:** Extractive QA
|
149 |
+
**Training data:** SQuAD 2.0
|
150 |
+
**Eval data:** SQuAD 2.0
|
151 |
+
**Code:** See [an example QA pipeline on Haystack](https://haystack.deepset.ai/tutorials/first-qa-system)
|
152 |
+
**Infrastructure**: 4x Tesla v100
|
153 |
+
|
154 |
+
## Hyperparameters
|
155 |
+
|
156 |
+
```
|
157 |
+
batch_size = 96
|
158 |
+
n_epochs = 2
|
159 |
+
base_LM_model = "roberta-base"
|
160 |
+
max_seq_len = 386
|
161 |
+
learning_rate = 3e-5
|
162 |
+
lr_schedule = LinearWarmup
|
163 |
+
warmup_proportion = 0.2
|
164 |
+
doc_stride=128
|
165 |
+
max_query_length=64
|
166 |
+
```
|
167 |
+
|
168 |
+
## Using a distilled model instead
|
169 |
+
Please note that we have also released a distilled version of this model called [deepset/tinyroberta-squad2](https://huggingface.co/deepset/tinyroberta-squad2). The distilled model has a comparable prediction quality and runs at twice the speed of the base model.
|
170 |
+
|
171 |
+
## Usage
|
172 |
+
|
173 |
+
### In Haystack
|
174 |
+
Haystack is an NLP framework by deepset. You can use this model in a Haystack pipeline to do question answering at scale (over many documents). To load the model in [Haystack](https://github.com/deepset-ai/haystack/):
|
175 |
+
```python
|
176 |
+
reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2")
|
177 |
+
# or
|
178 |
+
reader = TransformersReader(model_name_or_path="deepset/roberta-base-squad2",tokenizer="deepset/roberta-base-squad2")
|
179 |
+
```
|
180 |
+
For a complete example of ``roberta-base-squad2`` being used for Question Answering, check out the [Tutorials in Haystack Documentation](https://haystack.deepset.ai/tutorials/first-qa-system)
|
181 |
+
|
182 |
+
### In Transformers
|
183 |
+
```python
|
184 |
+
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
185 |
+
|
186 |
+
model_name = "deepset/roberta-base-squad2"
|
187 |
+
|
188 |
+
# a) Get predictions
|
189 |
+
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
|
190 |
+
QA_input = {
|
191 |
+
'question': 'Why is model conversion important?',
|
192 |
+
'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
|
193 |
+
}
|
194 |
+
res = nlp(QA_input)
|
195 |
+
|
196 |
+
# b) Load model & tokenizer
|
197 |
+
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
198 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
199 |
+
```
|
200 |
+
|
201 |
+
## Performance
|
202 |
+
Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/).
|
203 |
+
|
204 |
+
```
|
205 |
+
"exact": 79.87029394424324,
|
206 |
+
"f1": 82.91251169582613,
|
207 |
+
|
208 |
+
"total": 11873,
|
209 |
+
"HasAns_exact": 77.93522267206478,
|
210 |
+
"HasAns_f1": 84.02838248389763,
|
211 |
+
"HasAns_total": 5928,
|
212 |
+
"NoAns_exact": 81.79983179142137,
|
213 |
+
"NoAns_f1": 81.79983179142137,
|
214 |
+
"NoAns_total": 5945
|
215 |
+
```
|
216 |
+
|
217 |
+
## Authors
|
218 |
+
**Branden Chan:** [email protected]
|
219 |
+
**Timo Möller:** [email protected]
|
220 |
+
**Malte Pietsch:** [email protected]
|
221 |
+
**Tanay Soni:** [email protected]
|
222 |
+
|
223 |
+
## About us
|
224 |
+
|
225 |
+
<div class="grid lg:grid-cols-2 gap-x-4 gap-y-3">
|
226 |
+
<div class="w-full h-40 object-cover mb-2 rounded-lg flex items-center justify-center">
|
227 |
+
<img alt="" src="https://raw.githubusercontent.com/deepset-ai/.github/main/deepset-logo-colored.png" class="w-40"/>
|
228 |
+
</div>
|
229 |
+
<div class="w-full h-40 object-cover mb-2 rounded-lg flex items-center justify-center">
|
230 |
+
<img alt="" src="https://raw.githubusercontent.com/deepset-ai/.github/main/haystack-logo-colored.png" class="w-40"/>
|
231 |
+
</div>
|
232 |
+
</div>
|
233 |
+
|
234 |
+
[deepset](http://deepset.ai/) is the company behind the open-source NLP framework [Haystack](https://haystack.deepset.ai/) which is designed to help you build production ready NLP systems that use: Question answering, summarization, ranking etc.
|
235 |
+
|
236 |
+
|
237 |
+
Some of our other work:
|
238 |
+
- [Distilled roberta-base-squad2 (aka "tinyroberta-squad2")]([https://huggingface.co/deepset/tinyroberta-squad2)
|
239 |
+
- [German BERT (aka "bert-base-german-cased")](https://deepset.ai/german-bert)
|
240 |
+
- [GermanQuAD and GermanDPR datasets and models (aka "gelectra-base-germanquad", "gbert-base-germandpr")](https://deepset.ai/germanquad)
|
241 |
+
|
242 |
+
## Get in touch and join the Haystack community
|
243 |
+
|
244 |
+
<p>For more info on Haystack, visit our <strong><a href="https://github.com/deepset-ai/haystack">GitHub</a></strong> repo and <strong><a href="https://docs.haystack.deepset.ai">Documentation</a></strong>.
|
245 |
+
|
246 |
+
We also have a <strong><a class="h-7" href="https://haystack.deepset.ai/community">Discord community open to everyone!</a></strong></p>
|
247 |
+
|
248 |
+
[Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Discord](https://haystack.deepset.ai/community) | [GitHub Discussions](https://github.com/deepset-ai/haystack/discussions) | [Website](https://deepset.ai)
|
249 |
+
|
250 |
+
By the way: [we're hiring!](http://www.deepset.ai/jobs)
|
config.json
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"RobertaForQuestionAnswering"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"bos_token_id": 0,
|
7 |
+
"eos_token_id": 2,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"language": "english",
|
15 |
+
"layer_norm_eps": 1e-05,
|
16 |
+
"max_position_embeddings": 514,
|
17 |
+
"model_type": "roberta",
|
18 |
+
"name": "Roberta",
|
19 |
+
"num_attention_heads": 12,
|
20 |
+
"num_hidden_layers": 12,
|
21 |
+
"pad_token_id": 1,
|
22 |
+
"type_vocab_size": 1,
|
23 |
+
"vocab_size": 50265
|
24 |
+
}
|
flax_model.msgpack
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a8d759d881d9c1b39dbf4ee451fb8a8c2d43ccbd180218863a54ffd9b4d2447
|
3 |
+
size 496233457
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ac5db66fdcfecb400345d09787b71009d60805ef9883451071669cf951b5e2c7
|
3 |
+
size 496254442
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e0b64ccefc1bcb569b604baea27eb873e5482fdf6eb3ceff1fb5368397db5aed
|
3 |
+
size 496313727
|
rust_model.ot
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5a16ed126bbc8c4cf794406bac0c7946f62d0f175c02dc54d77a00a6255597ed
|
3 |
+
size 498638704
|
special_tokens_map.json
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": true,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"cls_token": {
|
10 |
+
"content": "<s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": true,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"eos_token": {
|
17 |
+
"content": "</s>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": true,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"mask_token": {
|
24 |
+
"content": "<mask>",
|
25 |
+
"lstrip": true,
|
26 |
+
"normalized": true,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
},
|
30 |
+
"pad_token": {
|
31 |
+
"content": "<pad>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": true,
|
34 |
+
"rstrip": false,
|
35 |
+
"single_word": false
|
36 |
+
},
|
37 |
+
"sep_token": {
|
38 |
+
"content": "</s>",
|
39 |
+
"lstrip": false,
|
40 |
+
"normalized": true,
|
41 |
+
"rstrip": false,
|
42 |
+
"single_word": false
|
43 |
+
},
|
44 |
+
"unk_token": {
|
45 |
+
"content": "<unk>",
|
46 |
+
"lstrip": false,
|
47 |
+
"normalized": true,
|
48 |
+
"rstrip": false,
|
49 |
+
"single_word": false
|
50 |
+
}
|
51 |
+
}
|
tf_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9b672dd16f09f6f805d407800278e60217b9d7c040df1dde5098765a40cdc88a
|
3 |
+
size 496513256
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"0": {
|
5 |
+
"content": "<s>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": true,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"1": {
|
13 |
+
"content": "<pad>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": true,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"2": {
|
21 |
+
"content": "</s>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": true,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
},
|
28 |
+
"3": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false,
|
34 |
+
"special": true
|
35 |
+
},
|
36 |
+
"50264": {
|
37 |
+
"content": "<mask>",
|
38 |
+
"lstrip": true,
|
39 |
+
"normalized": true,
|
40 |
+
"rstrip": false,
|
41 |
+
"single_word": false,
|
42 |
+
"special": true
|
43 |
+
}
|
44 |
+
},
|
45 |
+
"bos_token": "<s>",
|
46 |
+
"clean_up_tokenization_spaces": true,
|
47 |
+
"cls_token": "<s>",
|
48 |
+
"do_lower_case": false,
|
49 |
+
"eos_token": "</s>",
|
50 |
+
"errors": "replace",
|
51 |
+
"full_tokenizer_file": null,
|
52 |
+
"mask_token": "<mask>",
|
53 |
+
"model_max_length": 512,
|
54 |
+
"pad_token": "<pad>",
|
55 |
+
"sep_token": "</s>",
|
56 |
+
"tokenizer_class": "RobertaTokenizer",
|
57 |
+
"trim_offsets": true,
|
58 |
+
"unk_token": "<unk>"
|
59 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|