import random import os import re import datasets from datasets.tasks import TextClassification _DESCRIPTION = """\ Common Crawl - Malayalam. """ _CITATION = """\ @article{qburst, title={Common Crawl - Malayalam}, author={n.d}, year={2020}, journal={n.d}, } """ _URLs = { "malayalam_wiki_1": "https://calicut.qburst.in/commoncrawl/malayalam/2020-10/malayalam_filtered_html_body.tar.gz", "malayalam_wiki_2": "https://calicut.qburst.in/commoncrawl/malayalam/2020-10/unfiltered_heading_and_para.tar.gz" } class MalayalamWikiConfig(datasets.BuilderConfig): """BuilderConfig for MalayalamWiki.""" def __init__(self, **kwargs): """BuilderConfig for MalayalamWiki. Args: **kwargs: keyword arguments forwarded to super. """ super(MalayalamWikiConfig, self).__init__(**kwargs) class MalayalamWiki(datasets.GeneratorBasedBuilder): """Malayalam News topic classification dataset.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ MalayalamWikiConfig( name="malayalam_wiki", version=VERSION, description="Common Crawl - Malayalam." ), ] def partition (self, list_in, n): random.shuffle(list_in) return [list_in[i::n] for i in range(n)] def remove_special_characters(self, txt): chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\”\�Utrnle\_]' unicode_ignore_regex = r'[\u200e\u200c\u200d]' english_ignore_regex = r'[a-zA-Z]' txt = txt.strip() txt = re.sub(chars_to_ignore_regex, '',txt) txt = re.sub(unicode_ignore_regex, '',txt) + " " txt = re.sub(english_ignore_regex, '',txt) + " " return txt def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "text": datasets.Value("string") } ), supervised_keys=None, homepage="https://github.com/qburst/common-crawl-malayalam", citation=_CITATION ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" # TODO: implement iter_archive() instead of download_and_extract dl_path = dl_manager.download_and_extract(_URLs) files = sorted(os.listdir(os.path.join(dl_path["malayalam_wiki_1"],"malayalam_filtered_html_body"))) file_paths = [os.path.join(dl_path["malayalam_wiki_1"], "malayalam_filtered_html_body" , file) for file in files] files = sorted(os.listdir(os.path.join(dl_path["malayalam_wiki_2"],"unfiltered_heading_and_para"))) wiki_2 = [os.path.join(dl_path["malayalam_wiki_2"],"unfiltered_heading_and_para", file) for file in files] file_paths.extend(wiki_2) filepaths_splice = self.partition(file_paths,3) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": filepaths_splice[0], "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": filepaths_splice[1], "split": "validation", }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": filepaths_splice[2], "split": "test", }, ) ] def _generate_examples(self, filepath): for file_id, file in enumerate(filepath): with open(file, encoding="utf-8") as f: for row_id, row in enumerate(f): yield f"{file_id}_{row_id}", {"text": self.remove_special_characters(row).strip()}