Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
- README.md +1 -0
- reuters21578.py +52 -33
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
languages:
|
3 |
- en
|
4 |
paperswithcode_id: reuters-21578
|
|
|
1 |
---
|
2 |
+
pretty_name: Reuters-21578 Text Categorization Collection
|
3 |
languages:
|
4 |
- en
|
5 |
paperswithcode_id: reuters-21578
|
reuters21578.py
CHANGED
@@ -17,7 +17,6 @@
|
|
17 |
"""Reuters 21578"""
|
18 |
|
19 |
|
20 |
-
import os
|
21 |
from textwrap import dedent
|
22 |
|
23 |
import datasets
|
@@ -253,33 +252,53 @@ class Reuters21578(datasets.GeneratorBasedBuilder):
|
|
253 |
)
|
254 |
|
255 |
def _split_generators(self, dl_manager):
|
256 |
-
|
257 |
-
|
258 |
if self.config.name == "ModHayes":
|
259 |
return [
|
260 |
datasets.SplitGenerator(
|
261 |
name=datasets.Split.TEST,
|
262 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
263 |
),
|
264 |
datasets.SplitGenerator(
|
265 |
name=datasets.Split.TRAIN,
|
266 |
-
gen_kwargs={
|
|
|
|
|
|
|
|
|
267 |
),
|
268 |
]
|
269 |
else:
|
270 |
return [
|
271 |
-
datasets.SplitGenerator(
|
272 |
-
|
273 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
]
|
275 |
|
276 |
-
def _generate_examples(self,
|
277 |
"""This function returns the examples in the raw (text) form."""
|
278 |
-
for
|
279 |
-
|
280 |
-
file
|
281 |
-
|
282 |
-
line = f.readline()
|
283 |
while line:
|
284 |
if line.startswith("<REUTERS"):
|
285 |
lewis_split = ""
|
@@ -301,7 +320,7 @@ class Reuters21578(datasets.GeneratorBasedBuilder):
|
|
301 |
old_id = line[4].split("=")[1]
|
302 |
new_id = line[5].split("=")[1][:-1]
|
303 |
has_topic = line[1].split("=")[1]
|
304 |
-
line = f.readline()
|
305 |
if (
|
306 |
(self.config.name == "ModHayes" and split not in cgis_split)
|
307 |
or (
|
@@ -325,7 +344,7 @@ class Reuters21578(datasets.GeneratorBasedBuilder):
|
|
325 |
): # skip example that are not in the current split
|
326 |
li = line
|
327 |
while li and not li.startswith("<REUTERS"):
|
328 |
-
li = f.readline()
|
329 |
if li:
|
330 |
line = li
|
331 |
elif line.startswith("<TOPICS>"):
|
@@ -333,65 +352,65 @@ class Reuters21578(datasets.GeneratorBasedBuilder):
|
|
333 |
line = line.split("<D>")
|
334 |
topics = [topic.replace("</D>", "") for topic in line[1:]]
|
335 |
topics = [topic.replace("</TOPICS>\n", "") for topic in topics]
|
336 |
-
line = f.readline()
|
337 |
elif line.startswith("<PLACES>"):
|
338 |
if line.replace("\n", "") != "<PLACES></PLACES>":
|
339 |
line = line.split("<D>")
|
340 |
places = [place.replace("</D>", "") for place in line[1:]]
|
341 |
places = [place.replace("</PLACES>\n", "") for place in places]
|
342 |
-
line = f.readline()
|
343 |
elif line.startswith("<PEOPLE>"):
|
344 |
if line.replace("\n", "") != "<PEOPLE></PEOPLE>":
|
345 |
line = line.split("<D>")
|
346 |
people = [p.replace("</D>", "") for p in line[1:]]
|
347 |
people = [p.replace("</PEOPLE>\n", "") for p in people]
|
348 |
-
line = f.readline()
|
349 |
elif line.startswith("<ORGS>"):
|
350 |
if line.replace("\n", "") != "<ORGS></ORGS>":
|
351 |
line = line.split("<D>")
|
352 |
orgs = [org.replace("</D>", "") for org in line[1:]]
|
353 |
orgs = [org.replace("</ORGS>\n", "") for org in orgs]
|
354 |
-
line = f.readline()
|
355 |
elif line.startswith("<EXCHANGES>"):
|
356 |
if line.replace("\n", "") != "<EXCHANGES></EXCHANGES>":
|
357 |
line = line.split("<D>")
|
358 |
exchanges = [ex.replace("</D>", "") for ex in line[1:]]
|
359 |
exchanges = [ex.replace("</EXCHANGES>\n", "") for ex in exchanges]
|
360 |
-
line = f.readline()
|
361 |
elif line.startswith("<DATE>"):
|
362 |
date = line.replace("\n", "")
|
363 |
date = line[6:-8]
|
364 |
-
line = f.readline()
|
365 |
elif line.startswith("<TITLE>"):
|
366 |
title = line[7:-9]
|
367 |
-
line = f.readline()
|
368 |
elif "*<TITLE>" in line:
|
369 |
# These lines start with a variable number of * chars
|
370 |
title = line.split("*<TITLE>")[1][:-1]
|
371 |
-
line = f.readline()
|
372 |
while "</TITLE>" not in line:
|
373 |
# Convert any \n in TYPE="BRIEF" text to spaces to match other titles
|
374 |
title += " " + line[:-1]
|
375 |
-
line = f.readline()
|
376 |
elif "<BODY>" in line:
|
377 |
text = line.split("<BODY>")[1]
|
378 |
-
line = f.readline()
|
379 |
while "</BODY>" not in line:
|
380 |
text += line
|
381 |
-
line = f.readline()
|
382 |
elif line.startswith('<TEXT TYPE="UNPROC">'):
|
383 |
text_type = '"UNPROC"'
|
384 |
text = line[20:]
|
385 |
-
line = f.readline()
|
386 |
while "</TEXT>" not in line:
|
387 |
text += line
|
388 |
-
line = f.readline()
|
389 |
elif line.startswith('<TEXT TYPE="BRIEF">'):
|
390 |
text_type = '"BRIEF"'
|
391 |
-
line = f.readline()
|
392 |
elif line.startswith("<TEXT>"):
|
393 |
text_type = '"NORM"'
|
394 |
-
line = f.readline()
|
395 |
elif line.startswith("</REUTERS>"):
|
396 |
yield new_id, {
|
397 |
"lewis_split": lewis_split,
|
@@ -408,6 +427,6 @@ class Reuters21578(datasets.GeneratorBasedBuilder):
|
|
408 |
"text": text,
|
409 |
"text_type": text_type,
|
410 |
}
|
411 |
-
line = f.readline()
|
412 |
else:
|
413 |
-
line = f.readline()
|
|
|
17 |
"""Reuters 21578"""
|
18 |
|
19 |
|
|
|
20 |
from textwrap import dedent
|
21 |
|
22 |
import datasets
|
|
|
252 |
)
|
253 |
|
254 |
def _split_generators(self, dl_manager):
|
255 |
+
archive = dl_manager.download(_DATA_URL)
|
256 |
+
filepaths = ["reut2-" + "%03d" % i + ".sgm" for i in range(22)]
|
257 |
if self.config.name == "ModHayes":
|
258 |
return [
|
259 |
datasets.SplitGenerator(
|
260 |
name=datasets.Split.TEST,
|
261 |
+
gen_kwargs={
|
262 |
+
"filepaths": filepaths,
|
263 |
+
"split": "PUBLISHED-TESTSET",
|
264 |
+
"files": dl_manager.iter_archive(archive),
|
265 |
+
},
|
266 |
),
|
267 |
datasets.SplitGenerator(
|
268 |
name=datasets.Split.TRAIN,
|
269 |
+
gen_kwargs={
|
270 |
+
"filepaths": filepaths,
|
271 |
+
"split": "TRAINING-SET",
|
272 |
+
"files": dl_manager.iter_archive(archive),
|
273 |
+
},
|
274 |
),
|
275 |
]
|
276 |
else:
|
277 |
return [
|
278 |
+
datasets.SplitGenerator(
|
279 |
+
name=datasets.Split.TEST,
|
280 |
+
gen_kwargs={"filepaths": filepaths, "split": "TEST", "files": dl_manager.iter_archive(archive)},
|
281 |
+
),
|
282 |
+
datasets.SplitGenerator(
|
283 |
+
name=datasets.Split.TRAIN,
|
284 |
+
gen_kwargs={"filepaths": filepaths, "split": "TRAIN", "files": dl_manager.iter_archive(archive)},
|
285 |
+
),
|
286 |
+
datasets.SplitGenerator(
|
287 |
+
name="unused",
|
288 |
+
gen_kwargs={
|
289 |
+
"filepaths": filepaths,
|
290 |
+
"split": "NOT-USED",
|
291 |
+
"files": dl_manager.iter_archive(archive),
|
292 |
+
},
|
293 |
+
),
|
294 |
]
|
295 |
|
296 |
+
def _generate_examples(self, filepaths, split, files):
|
297 |
"""This function returns the examples in the raw (text) form."""
|
298 |
+
for path, f in files:
|
299 |
+
if path in filepaths:
|
300 |
+
# only the file reut2-017 has one line non UTF-8 encoded so we can ignore it
|
301 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
|
|
302 |
while line:
|
303 |
if line.startswith("<REUTERS"):
|
304 |
lewis_split = ""
|
|
|
320 |
old_id = line[4].split("=")[1]
|
321 |
new_id = line[5].split("=")[1][:-1]
|
322 |
has_topic = line[1].split("=")[1]
|
323 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
324 |
if (
|
325 |
(self.config.name == "ModHayes" and split not in cgis_split)
|
326 |
or (
|
|
|
344 |
): # skip example that are not in the current split
|
345 |
li = line
|
346 |
while li and not li.startswith("<REUTERS"):
|
347 |
+
li = f.readline().decode("utf-8", errors="ignore")
|
348 |
if li:
|
349 |
line = li
|
350 |
elif line.startswith("<TOPICS>"):
|
|
|
352 |
line = line.split("<D>")
|
353 |
topics = [topic.replace("</D>", "") for topic in line[1:]]
|
354 |
topics = [topic.replace("</TOPICS>\n", "") for topic in topics]
|
355 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
356 |
elif line.startswith("<PLACES>"):
|
357 |
if line.replace("\n", "") != "<PLACES></PLACES>":
|
358 |
line = line.split("<D>")
|
359 |
places = [place.replace("</D>", "") for place in line[1:]]
|
360 |
places = [place.replace("</PLACES>\n", "") for place in places]
|
361 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
362 |
elif line.startswith("<PEOPLE>"):
|
363 |
if line.replace("\n", "") != "<PEOPLE></PEOPLE>":
|
364 |
line = line.split("<D>")
|
365 |
people = [p.replace("</D>", "") for p in line[1:]]
|
366 |
people = [p.replace("</PEOPLE>\n", "") for p in people]
|
367 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
368 |
elif line.startswith("<ORGS>"):
|
369 |
if line.replace("\n", "") != "<ORGS></ORGS>":
|
370 |
line = line.split("<D>")
|
371 |
orgs = [org.replace("</D>", "") for org in line[1:]]
|
372 |
orgs = [org.replace("</ORGS>\n", "") for org in orgs]
|
373 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
374 |
elif line.startswith("<EXCHANGES>"):
|
375 |
if line.replace("\n", "") != "<EXCHANGES></EXCHANGES>":
|
376 |
line = line.split("<D>")
|
377 |
exchanges = [ex.replace("</D>", "") for ex in line[1:]]
|
378 |
exchanges = [ex.replace("</EXCHANGES>\n", "") for ex in exchanges]
|
379 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
380 |
elif line.startswith("<DATE>"):
|
381 |
date = line.replace("\n", "")
|
382 |
date = line[6:-8]
|
383 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
384 |
elif line.startswith("<TITLE>"):
|
385 |
title = line[7:-9]
|
386 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
387 |
elif "*<TITLE>" in line:
|
388 |
# These lines start with a variable number of * chars
|
389 |
title = line.split("*<TITLE>")[1][:-1]
|
390 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
391 |
while "</TITLE>" not in line:
|
392 |
# Convert any \n in TYPE="BRIEF" text to spaces to match other titles
|
393 |
title += " " + line[:-1]
|
394 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
395 |
elif "<BODY>" in line:
|
396 |
text = line.split("<BODY>")[1]
|
397 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
398 |
while "</BODY>" not in line:
|
399 |
text += line
|
400 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
401 |
elif line.startswith('<TEXT TYPE="UNPROC">'):
|
402 |
text_type = '"UNPROC"'
|
403 |
text = line[20:]
|
404 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
405 |
while "</TEXT>" not in line:
|
406 |
text += line
|
407 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
408 |
elif line.startswith('<TEXT TYPE="BRIEF">'):
|
409 |
text_type = '"BRIEF"'
|
410 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
411 |
elif line.startswith("<TEXT>"):
|
412 |
text_type = '"NORM"'
|
413 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
414 |
elif line.startswith("</REUTERS>"):
|
415 |
yield new_id, {
|
416 |
"lewis_split": lewis_split,
|
|
|
427 |
"text": text,
|
428 |
"text_type": text_type,
|
429 |
}
|
430 |
+
line = f.readline().decode("utf-8", errors="ignore")
|
431 |
else:
|
432 |
+
line = f.readline().decode("utf-8", errors="ignore")
|