Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
IndicWikiBio / IndicWikiBio.py
krsnaman's picture
Update IndicWikiBio.py
47c33f4
import json
import os
import datasets
_CITATION = """\
@inproceedings{Kumar2022IndicNLGSM,
title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
year={2022},
url = "https://arxiv.org/abs/2203.05437"
}
"""
_DESCRIPTION = """\
This is the WikiBio dataset released as part of IndicNLG Suite. Each
example has four fields: id, infobox, serialized infobox and summary. We create this dataset in nine
languages including as, bn, hi, kn, ml, or, pa, ta, te. The total
size of the dataset is 57,426.
"""
_HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
_LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
_URL = "https://huggingface.co/datasets/ai4bharat/IndicWikiBio/resolve/main/data/{}_WikiBio_v{}.zip"
_LANGUAGES = [
"as",
"bn",
"hi",
"kn",
"ml",
"or",
"pa",
"ta",
"te"
]
class WikiBio(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="{}".format(lang),
version=datasets.Version("1.0.0")
)
for lang in _LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"infobox": datasets.Value("string"),
"serialized_infobox": datasets.Value("string"),
"summary": datasets.Value("string")
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang = str(self.config.name)
url = _URL.format(lang, self.VERSION.version_str[:-2])
data_dir = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_train" + ".jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_test" + ".jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, lang + "_val" + ".jsonl"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {
"id": data["id"],
"infobox": data["infobox"],
"serialized_infobox": data["serialized_infobox"],
"summary": data["summary"]
}