import os import datasets import pandas as pd _CITATION = """""" _DESCRIPTION = """\ This dataset is designed to be used in training models that restore punctuation marks from the output of Automatic Speech Recognition system for Polish language. """ _HOMEPAGE = "https://github.com/poleval/2021-punctuation-restoration" _URL = "https://huggingface.co/datasets/lruczu/2021-punctuation-restoration/resolve/main" _PATHS = { "train": os.path.join(_URL, "train"), "test-A": os.path.join(_URL, "test-A"), } class PunctuationDatasetConfig(datasets.BuilderConfig): """BuilderConfig for AfrikaansNerCorpus""" def __init__(self, **kwargs): """BuilderConfig for PunctuationDataset. Args: **kwargs: keyword arguments forwarded to super. """ super(PunctuationDatasetConfig, self).__init__(**kwargs) class PunctuationDataset(datasets.GeneratorBasedBuilder): """TODO: Short description of my dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ PunctuationDatasetConfig( name="punctuation_dataset", version=datasets.Version("1.0.0"), description="PunctuationDataset dataset", ), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text_in": datasets.Value("string"), "text_out": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "tags": datasets.Sequence( datasets.features.ClassLabel( names=[ 'B-.', 'B-,', 'B--', 'B-!', 'B-?', 'B-:', 'B-;', 'O', ] ) ) }), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": _PATHS["train"]} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": _PATHS["test-A"]} ), ] def _generate_examples(self, filepath): in_df = pd.read_csv(os.path.join(filepath, "in.tsv"), sep='\t', header=None) out_df = pd.read_csv(os.path.join(filepath, 'expected.tsv'), sep='\t', header=None) for key, ((_, row_in), (_, row_out)) in enumerate(zip(in_df.iterrows(), out_df.iterrows()), 1): text_in = PunctuationDataset._clean_text(row_in[1]) text_out = PunctuationDataset._clean_text(row_out[0]) tokens = [] tags = [] for token_in, token_out in zip(text_in.split(), text_out.split()): assert token_in.lower() in token_out.lower() tokens.append(token_in) if token_in.lower() == token_out.lower(): tags.append('O') else: tags.append(f'B-{token_out[-1]}') yield key, { "text_in": text_in, "text_out": text_out, "tokens": tokens, "tags": tags } @staticmethod def _clean_text(text: str, lower: bool = False) -> str: if lower: text = text.lower() text = text.replace(' -', '') text = text.replace(' .', '') text = text.replace(' ,', '') text = text.replace(' ', ' ') text = text.strip() return text