File size: 3,406 Bytes
c189f56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import csv
import os
import datasets
csv.field_size_limit(int(1e6)) # to accommodate large fields
_CITATION = """\
@mastersthesis{chumpolsathien_2020,
title={Using Knowledge Distillation from Keyword Extraction to Improve the Informativeness of Neural Cross-lingual Summarization},
author={Chumpolsathien, Nakhun},
year={2020},
school={Beijing Institute of Technology}
"""
_DESCRIPTION = """\
ThaiSum is a large-scale corpus for Thai text summarization obtained from several online news websites namely Thairath,
ThaiPBS, Prachathai, and The Standard. This dataset consists of over 350,000 article and summary pairs
written by journalists.
"""
class ThaiSumConfig(datasets.BuilderConfig):
"""BuilderConfig for ThaiSum."""
def __init__(self, **kwargs):
"""BuilderConfig for ThaiSum.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(ThaiSumConfig, self).__init__(**kwargs)
class Thaisum(datasets.GeneratorBasedBuilder):
"""ThaiSum: The largest dataset for Thai text summarization"""
_DOWNLOAD_URL = "https://archive.org/download/thaisum_datasets/data.zip"
_TRAIN_FILE = "train.csv"
_VAL_FILE = "valid.csv"
_TEST_FILE = "test.csv"
BUILDER_CONFIGS = [
ThaiSumConfig(
name="thaisum",
version=datasets.Version("1.0.0"),
description="ThaiSum: The largest dataset for Thai text summarization",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"title": datasets.Value("string"),
"body": datasets.Value("string"),
"summary": datasets.Value("string"),
"type": datasets.Value("string"),
"tags": datasets.Value("string"),
"url": datasets.Value("string"),
}
),
supervised_keys=("body", "summary"),
homepage="https://github.com/nakhunchumpolsathien/ThaiSum",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
arch_path = dl_manager.download_and_extract(self._DOWNLOAD_URL)
data_dir = os.path.join(arch_path, "data")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(data_dir, self._TRAIN_FILE)},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": os.path.join(data_dir, self._VAL_FILE)},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(data_dir, self._TEST_FILE)},
),
]
def _generate_examples(self, filepath):
"""Generate examples."""
with open(filepath, encoding="utf-8") as f:
csv_reader = csv.reader(f)
next(csv_reader) # skip header
for id_, row in enumerate(csv_reader):
yield id_, {
"title": row[0],
"body": row[1],
"summary": row[2],
"type": row[3],
"tags": row[4],
"url": row[5],
}
|