nlprepl / nlprepl.py
Martyna Wiącek
added script for loading dataset
373367f
raw
history blame
No virus
5.87 kB
"""NKJP1M: The manually annotated subcorpus of the National Corpus of Polish"""
import conllu
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = ""
BY_NAME = "by_name"
BY_TYPE = "by_type"
TAGSET_NKJP = "nkjp"
TAGSET_UD = "ud"
_DESCRIPTION = {
BY_NAME: {
TAGSET_NKJP: "NLPrePL divided by document name for NKJP tagset",
TAGSET_UD: "TNLPrePL divided by document name for UD tagset"
},
BY_TYPE: {
TAGSET_NKJP: "NLPrePL divided by document type for NKJP tagset",
TAGSET_UD: "NLPrePL divided by document type for UD tagset"
}
}
_TYPES = [BY_NAME, BY_TYPE]
_TAGSETS = [TAGSET_NKJP, TAGSET_UD]
_URLS = {
BY_NAME: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_name/train_nlprepl-nkjp.conllu.gz",
'dev': "nkjp_tagset/fair_by_document_name/dev_nlprepl-nkjp.conllu.gz",
'test': "nkjp_tagset/fair_by_document_name/test_nlprepl-nkjp.conllu.gz"
},
TAGSET_UD: {
'train': "ud_tagset/fair_by_document_name/train_nlprepl-ud.conllu.gz",
'dev': "ud_tagset/fair_by_document_name/dev_nlprepl-ud.conllu.gz",
'test': "ud_tagset/fair_by_document_name/test_nlprepl-ud.conllu.gz"
}
},
BY_TYPE: {
TAGSET_NKJP: {
'train': "nkjp_tagset/fair_by_document_type/train_nlprepl-nkjp.conllu.gz",
'dev': "nkjp_tagset/fair_by_document_type/dev_nlprepl-nkjp.conllu.gz",
'test': "nkjp_tagset/fair_by_document_type/test_nlprepl-nkjp.conllu.gz"
},
TAGSET_UD: {
'train': "ud_tagset/fair_by_document_type/train_nlprepl-ud.conllu.gz",
'dev': "ud_tagset/fair_by_document_type/dev_nlprepl-ud.conllu.gz",
'test': "ud_tagset/fair_by_document_type/test_nlprepl-ud.conllu.gz"
}
}
}
class NLPrePLConfig(datasets.BuilderConfig):
"""BuilderConfig for NKJP1M"""
def __init__(self, tagset, **kwargs):
"""BuilderConfig forNKJP1M.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(NLPrePLConfig, self).__init__(**kwargs)
self.tagset = tagset
class NLPrePL(datasets.GeneratorBasedBuilder):
"""NKJP1M dataset."""
BUILDER_CONFIGS = [
NLPrePLConfig(
name=t + "-" + tagset,
version=datasets.Version("1.0.0"),
tagset=tagset,
description=_DESCRIPTION[t]
)
for t in _TYPES for tagset in _TAGSETS
]
def _info(self):
dataset, tagset = self.config.name.split("-")
return datasets.DatasetInfo(
description=_DESCRIPTION[dataset][tagset],
features=datasets.Features(
{
"sent_id": datasets.Value("string"),
"text": datasets.Value("string"),
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"lemmas": datasets.Sequence(datasets.Value("string")),
"upos": datasets.Sequence(datasets.Value("string")),
"xpos": datasets.Sequence(datasets.Value("string")),
"feats": datasets.Sequence(datasets.Value("string")),
"head": datasets.Sequence(datasets.Value("string")),
"deprel": datasets.Sequence(datasets.Value("string")),
"deps": datasets.Sequence(datasets.Value("string")),
"misc": datasets.Sequence(datasets.Value("string")),
}
),
supervised_keys=None,
homepage="http://nkjp.pl/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dataset, tagset = self.config.name.split("-")
urls = _URLS[dataset][tagset]
downloaded_files = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_files["train"]}),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": downloaded_files["dev"]}),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": downloaded_files["test"]}),
]
def _generate_examples(self, filepath):
id = 0
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, 'r', encoding="utf-8") as f:
tokenlist = list(conllu.parse_incr(f))
for sent in tokenlist:
if "sent_id" in sent.metadata:
idx = sent.metadata["sent_id"]
else:
idx = id
tokens = [token["form"] for token in sent]
if "text" in sent.metadata:
txt = sent.metadata["text"]
else:
txt = " ".join(tokens)
yield id, {
"sent_id": str(idx),
"text": txt,
"id": [token["id"] for token in sent],
"tokens": [token["form"] for token in sent],
"lemmas": [token["lemma"] for token in sent],
"upos": [token["upos"] for token in sent],
"xpos": [token["xpos"] for token in sent],
"feats": [str(token["feats"]) for token in sent],
"head": [str(token["head"]) for token in sent],
"deprel": [str(token["deprel"]) for token in sent],
"deps": [str(token["deps"]) for token in sent],
"misc": [str(token["misc"]) for token in sent],
}
id += 1