ramybaly
commited on
Commit
·
0b7f395
1
Parent(s):
d0f678c
updated field names from 'ner_tags' to 'tags'
Browse files- conll2012.py +16 -18
conll2012.py
CHANGED
@@ -185,9 +185,6 @@ class Conll2012(datasets.GeneratorBasedBuilder):
|
|
185 |
def _split_generators(self, dl_manager):
|
186 |
"""Returns SplitGenerators."""
|
187 |
urls_to_download = {
|
188 |
-
# "train": f"{_URL}{_TRAINING_FILE}",
|
189 |
-
# "validation": f"{_URL}{_DEV_FILE}",
|
190 |
-
# "test": f"{_URL}{_TEST_FILE}",
|
191 |
'train': 'train.txt',
|
192 |
'validation': 'validation.txt',
|
193 |
'test': 'test.txt',
|
@@ -195,9 +192,9 @@ class Conll2012(datasets.GeneratorBasedBuilder):
|
|
195 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
196 |
|
197 |
return [
|
198 |
-
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
|
199 |
-
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={
|
200 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={
|
201 |
]
|
202 |
|
203 |
def _generate_examples(self, filepath):
|
@@ -208,32 +205,33 @@ class Conll2012(datasets.GeneratorBasedBuilder):
|
|
208 |
|
209 |
guid = 0
|
210 |
tokens = []
|
|
|
211 |
pos_tags = []
|
212 |
-
ner_tags = []
|
213 |
|
214 |
for line in lines:
|
215 |
if line.startswith("-DOCSTART-") or line.strip() == "" or line == "\n":
|
216 |
if tokens:
|
217 |
yield guid, {
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
}
|
223 |
guid += 1
|
224 |
tokens = []
|
|
|
225 |
pos_tags = []
|
226 |
-
ner_tags = []
|
227 |
else:
|
228 |
-
# conll2012 tokens are tab-
|
229 |
splits = line.split('\t')
|
230 |
tokens.append(splits[0])
|
231 |
pos_tags.append(splits[1])
|
232 |
-
|
|
|
233 |
# last example
|
234 |
yield guid, {
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
}
|
|
|
185 |
def _split_generators(self, dl_manager):
|
186 |
"""Returns SplitGenerators."""
|
187 |
urls_to_download = {
|
|
|
|
|
|
|
188 |
'train': 'train.txt',
|
189 |
'validation': 'validation.txt',
|
190 |
'test': 'test.txt',
|
|
|
192 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
193 |
|
194 |
return [
|
195 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files['train']}),
|
196 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': downloaded_files['validation']}),
|
197 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files['test']}),
|
198 |
]
|
199 |
|
200 |
def _generate_examples(self, filepath):
|
|
|
205 |
|
206 |
guid = 0
|
207 |
tokens = []
|
208 |
+
tags = []
|
209 |
pos_tags = []
|
|
|
210 |
|
211 |
for line in lines:
|
212 |
if line.startswith("-DOCSTART-") or line.strip() == "" or line == "\n":
|
213 |
if tokens:
|
214 |
yield guid, {
|
215 |
+
'id': str(guid),
|
216 |
+
'tokens': tokens,
|
217 |
+
'tags': tags,
|
218 |
+
'pos_tags': pos_tags,
|
219 |
}
|
220 |
guid += 1
|
221 |
tokens = []
|
222 |
+
tags = []
|
223 |
pos_tags = []
|
|
|
224 |
else:
|
225 |
+
# conll2012 tokens are tab-separated
|
226 |
splits = line.split('\t')
|
227 |
tokens.append(splits[0])
|
228 |
pos_tags.append(splits[1])
|
229 |
+
tags.append(splits[3].rstrip())
|
230 |
+
|
231 |
# last example
|
232 |
yield guid, {
|
233 |
+
'id': str(guid),
|
234 |
+
'tokens': tokens,
|
235 |
+
'tags': tags,
|
236 |
+
'pos_tags': pos_tags,
|
237 |
}
|