|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
This template serves as a starting point for contributing a dataset to the Nusantara Dataset repo. |
|
|
|
When modifying it for your dataset, look for TODO items that offer specific instructions. |
|
|
|
Full documentation on writing dataset loading scripts can be found here: |
|
https://huggingface.co/docs/datasets/add_dataset.html |
|
|
|
To create a dataset loading script you will create a class and implement 3 methods: |
|
* `_info`: Establishes the schema for the dataset, and returns a datasets.DatasetInfo object. |
|
* `_split_generators`: Downloads and extracts data for each split (e.g. train/val/test) or associate local data with each split. |
|
* `_generate_examples`: Creates examples from data on disk that conform to each schema defined in `_info`. |
|
|
|
TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset. |
|
|
|
[nusantara_schema_name] = (kb, pairs, qa, text, t2t, entailment) |
|
""" |
|
import os |
|
from pathlib import Path |
|
from typing import Dict, List, Tuple |
|
|
|
import datasets |
|
|
|
from nusacrowd.utils import schemas |
|
from nusacrowd.utils.configs import NusantaraConfig |
|
from nusacrowd.utils.constants import Tasks |
|
|
|
|
|
_CITATION = """\ |
|
@article{nllb2022, |
|
author = {NLLB Team, Marta R. Costa-jussà, James Cross, Onur Çelebi, Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula, Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews, Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmán, Philipp Koehn, Alexandre Mourachko, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Jeff Wang}, |
|
title = {No Language Left Behind: Scaling Human-Centered Machine Translation}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DATASETNAME = "nllb_seed" |
|
_LANGUAGES = ["ace", "bjn", "bug", "eng"] |
|
_LANGUAGE_MAP = {"ace": "Aceh", "bjn": "Banjar", "bug": "Bugis"} |
|
_LANG_CODE_MAP = {"Aceh": "ace", "Banjar": "bjn", "Bugis": "bug"} |
|
_LANGUAGE_PAIR = [("ace", "eng"), ("eng", "ace"), ("bjn", "eng"), ("eng", "bjn"), ("bug", "eng"), ("eng", "bug")] |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
No Language Left Behind Seed Data |
|
NLLB Seed is a set of professionally-translated sentences in the Wikipedia domain. Data for NLLB-Seed was sampled from Wikimedia’s List of articles every Wikipedia should have, a collection of topics in different fields of knowledge and human activity. NLLB-Seed consists of around six thousand sentences in 39 languages. NLLB-Seed is meant to be used for training rather than model evaluation. Due to this difference, NLLB-Seed does not go through the human quality assurance process present in FLORES-200. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/facebookresearch/flores/tree/main/nllb_seed" |
|
|
|
|
|
|
|
|
|
|
|
_LICENSE = "CC-BY-SA 4.0" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_URLS = { |
|
_DATASETNAME: "https://tinyurl.com/NLLBSeed", |
|
} |
|
|
|
|
|
_SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION] |
|
|
|
|
|
|
|
|
|
_SOURCE_VERSION = "1.0.0" |
|
_NUSANTARA_VERSION = "1.0.0" |
|
_LOCAL = False |
|
|
|
|
|
def nusantara_config_constructor(lang, schema, version): |
|
if lang == "": |
|
raise ValueError(f"Invalid lang {lang}") |
|
|
|
if schema != "source" and schema != "nusantara_t2t": |
|
raise ValueError(f"Invalid schema: {schema}") |
|
|
|
return NusantaraConfig( |
|
name="nllb_seed_{lang}_{schema}".format(lang=lang, schema=schema), |
|
version=datasets.Version(version), |
|
description="nllb_seed {schema} schema for {lang} language".format(lang=_LANGUAGE_MAP[lang], schema=schema), |
|
schema=schema, |
|
subset_id="nllb_seed", |
|
) |
|
|
|
|
|
|
|
class NLLBSeed(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) |
|
NUSANTARA_VERSION = datasets.Version(_NUSANTARA_VERSION) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [nusantara_config_constructor(lang, "source", _SOURCE_VERSION) for lang in _LANGUAGE_MAP] + [nusantara_config_constructor(lang, "nusantara_t2t", _NUSANTARA_VERSION) for lang in _LANGUAGE_MAP] |
|
""" |
|
BUILDER_CONFIGS = [ |
|
NusantaraConfig( |
|
name="nllb_seed_source", |
|
version=SOURCE_VERSION, |
|
description="nllb_seed source schema", |
|
schema="source", |
|
subset_id="nllb_seed", |
|
), |
|
NusantaraConfig( |
|
name="nllb_seed_nusantara_t2t", |
|
version=NUSANTARA_VERSION, |
|
description="nllb_seed Nusantara schema", |
|
schema="nusantara_t2t", |
|
subset_id="nllb_seed", |
|
), |
|
] |
|
""" |
|
DEFAULT_CONFIG_NAME = "nllb_seed_ace_source" |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.config.schema == "source": |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
features = datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"src": [datasets.Value("string")], |
|
"tgt": [datasets.Value("string")], |
|
} |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
elif self.config.schema == "nusantara_t2t": |
|
|
|
features = schemas.text2text_features |
|
|
|
|
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[_DATASETNAME] |
|
data_dir = Path(dl_manager.download_and_extract(urls)) / "NLLB-Seed" |
|
data_subdir = {"ace": os.path.join(data_dir, "ace_Latn-eng_Latn"), "bjn": os.path.join(data_dir, "bjn_Latn-eng_Latn"), "bug": os.path.join(data_dir, "bug_Latn-eng_Latn")} |
|
lang = self.config.name.split("_")[2] |
|
|
|
""" |
|
# TODO: KEEP if your dataset is LOCAL; remove if NOT |
|
if self.config.data_dir is None: |
|
raise ValueError("This is a local dataset. Please pass the data_dir kwarg to load_dataset.") |
|
else: |
|
data_dir = self.config.data_dir |
|
|
|
# Not all datasets have predefined canonical train/val/test splits. |
|
# If your dataset has no predefined splits, use datasets.Split.TRAIN for all of the data. |
|
""" |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={"filepath": {lang: os.path.join(data_subdir[lang], lang + "_Latn"), lang + "_eng": os.path.join(data_subdir[lang], "eng_Latn"), "split": "train"}}, |
|
) |
|
] |
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]: |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
|
|
|
|
|
|
lang = self.config.name.split("_")[2] |
|
lang_code = lang |
|
|
|
eng_text = open(filepath[lang_code + "_eng"], "r").readlines() |
|
lang_text = open(filepath[lang_code], "r").readlines() |
|
|
|
eng_text = list(map(str.strip, eng_text)) |
|
lang_text = list(map(str.strip, lang_text)) |
|
""" |
|
if self.config.schema == "source": |
|
# TODO: yield (key, example) tuples in the original dataset schema |
|
for key, example in thing: |
|
yield key, example |
|
|
|
elif self.config.schema == "nusantara_t2t": |
|
# TODO: yield (key, example) tuples in the nusantara schema |
|
for key, example in thing: |
|
yield key, example |
|
""" |
|
if self.config.schema == "source": |
|
for id, (src, tgt) in enumerate(zip(lang_text, eng_text)): |
|
row = { |
|
"id": str(id), |
|
"src": [src], |
|
"tgt": [tgt], |
|
} |
|
yield id, row |
|
|
|
elif self.config.schema == "nusantara_t2t": |
|
for id, (src, tgt) in enumerate(zip(lang_text, eng_text)): |
|
row = { |
|
"id": str(id), |
|
"text_1": src, |
|
"text_2": tgt, |
|
"text_1_name": lang_code, |
|
"text_2_name": "eng", |
|
} |
|
yield id, row |
|
else: |
|
raise ValueError(f"Invalid config: {self.config.name}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
datasets.load_dataset(__file__) |
|
|