"""TIS Daten aus Hamburg""" | |
import datasets | |
# Find for instance the citation on arxiv or on the dataset repo/website | |
_CITATION = """\ | |
@article{lif-15, | |
title = "LIF 15 LI Hamburg", | |
journal = "Data", | |
volume = "2", | |
number = "2", | |
year = "2023", | |
url = "https://li-hamburg.de", | |
pages = "313--330", | |
} | |
""" | |
# TODO: Add description of the dataset here | |
# You can copy an official description | |
_DESCRIPTION = """\ | |
Daten von LIF 15 zum TIS System für Fortbildungen | |
""" | |
_HOMEPAGE = "https://li-hamburg.de" | |
# TODO: Add the licence for the dataset here if you can find it | |
_LICENSE = "LDC User Agreement for Non-Members" | |
# TODO: Add link to the official dataset URLs here | |
# The HuggingFace dataset library don't host the datasets but only point to the original files | |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) | |
_URL = "alexkueck/tis" | |
_TRAINING_FILE = "tis.train.txt" | |
#_DEV_FILE = "tis.valid.txt" | |
_TEST_FILE = "tis.test.txt" | |
class TISConfig(datasets.BuilderConfig): | |
"""BuilderConfig for PtbTextOnly""" | |
def __init__(self, **kwargs): | |
"""BuilderConfig PtbTextOnly. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(TISConfig, self).__init__(**kwargs) | |
class TIS(datasets.GeneratorBasedBuilder): | |
"""Load the Penn Treebank dataset.""" | |
VERSION = datasets.Version("1.1.0") | |
# This is an example of a dataset with multiple configurations. | |
# If you don't want/need to define several sub-sets in your dataset, | |
# just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes. | |
# If you need to make complex sub-parts in the datasets with configurable options | |
# You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig | |
# BUILDER_CONFIG_CLASS = MyBuilderConfig | |
# You will be able to load one or the other configurations in the following list with | |
# data = datasets.load_dataset('my_dataset', 'first_domain') | |
# data = datasets.load_dataset('my_dataset', 'second_domain') | |
BUILDER_CONFIGS = [ | |
TISConfig( | |
name="tis", | |
version=VERSION, | |
description="Load TIS dataset", | |
), | |
] | |
def _info(self): | |
features = datasets.Features({"sentence": datasets.Value("string")}) | |
return datasets.DatasetInfo( | |
# This is the description that will appear on the datasets page. | |
description=_DESCRIPTION, | |
# This defines the different columns of the dataset and their types | |
features=features, # Here we define them above because they are different between the two configurations | |
# If there's a common (input, target) tuple from the features, | |
# specify them here. They'll be used if as_supervised=True in | |
# builder.as_dataset. | |
supervised_keys=None, | |
# Homepage of the dataset for documentation | |
homepage=_HOMEPAGE, | |
# License for the dataset if available | |
license=_LICENSE, | |
# Citation for the dataset | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration | |
# If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name | |
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs | |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. | |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive | |
my_urls = { | |
"train": f"{_URL}{_TRAINING_FILE}", | |
#"dev": f"{_URL}{_DEV_FILE}", | |
"test": f"{_URL}{_TEST_FILE}", | |
} | |
data_dir = dl_manager.download_and_extract(my_urls) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_dir["train"]}), | |
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_dir["test"]}), | |
#datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_dir["dev"]}), | |
] | |
def _generate_examples(self, filepath): | |
"""Yields examples.""" | |
# TODO: This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method. | |
# It is in charge of opening the given file and yielding (key, example) tuples from the dataset | |
# The key is not important, it's more here for legacy reason (legacy from tfds) | |
with open(filepath, encoding="utf-8") as f: | |
for id_, line in enumerate(f): | |
line = line.strip() | |
yield id_, {"sentence": line} |