Datasets:

Modalities:
Text
Formats:
csv
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 4,826 Bytes
dc04555
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
# coding=utf-8
# Copyright 2023 The GlotSprase Authors.
# Lint as: python3
"""
                                           GlotSprase
"""

""" This dataset loading script is built based on Hugging Face tutorial and OSCAR-2301's and CulturaX dataset script. """

import os
import collections

import pandas as pd

import datasets

logger = datasets.logging.get_logger(__name__)

_DESCRIPTION = """\
    GlotSprase \
"""

_URL = "https://huggingface.co/datasets/kargaranamir/GlotSparse"

_LICENSE = """
We do not own any of the text from which these data has been extracted.
We license the actual packaging, the metadata and the annotations of these data under the CC BY 4.0.

If you are a website/dataset owner and do not want your data to be included in this corpra, please send us an email at [email protected] .
"""

_CITATION = r"""\
@misc{GlotSparse,
  author = {Kargaran, Amir Hossein},
  title = {GlotSparse Corpus},
  year = {2023},
  publisher = {Github},
  journal = {Github Repository},
  howpublished = {{\\url{https://github.com/kargaranamir/GlotSparse}}},
}
"""

_BASE_DATA_PAT_FORMAT_STR = "{language}/{language}.csv"
_BASE_CHECKSUM_FILE_NAME = "checksum.sha256"


def _languages():
    """Create the sorted dictionary of language codes, and language names.
    Returns:
      The sorted dictionary as an instance of `collections.OrderedDict`.
    """
    langs = {
        "Balochi_Arab": "bal_Arab",
        "Twi_Latn": "twi_Latn",
        "Fanti_Latn": "fat_Latn",
        "South-Azerbaijani_Arab": "azb_Arab",
        "Southern-Kurdish_Arab": "sdh_Arab",
        "Gurani-Arab": "hac_Arab",
        "Southern-Uzbek": "uzs_Arab",
        "Kirmanjki-Latn": "kiu-Latn",
        "Southern-Uzbek_Arab": "uzs_Arab",
        "Gilaki": "glk_Arab",
    }

    langs = {v: k for k, v in langs.items()}
    return collections.OrderedDict(sorted(langs.items()))


class GlotConfig(datasets.BuilderConfig):
    """GlotSprase corpus."""

    def __init__(self, language: str, **kwargs):
        """BuilderConfig for GlotSprase.
        Args:
            language (str): It has to contain 3-letter coded strings following the writing script with an underline in between. For example: "glk_Arab", "fat_Latn".
            **kwargs: Keyword arguments forwarded to super.
        """
        # Validate the language.
        if language not in _languages():
            raise ValueError("Invalid language: %s " % language)

        name = f"{language}"
        description = (
            f"Original {_languages()[language]} GlotSprase dataset from 2023"
        )
        super(GlotConfig, self).__init__(
            name=name, description=description, **kwargs
        )

        # Additional attributes
        self.language = language
        self.base_data_path = _BASE_DATA_PAT_FORMAT_STR.format(language=language)


class Glot(datasets.ArrowBasedBuilder):
    """GlotSprase"""

    BUILDER_CONFIGS = [
        GlotConfig(  # pylint: disable=g-complex-comprehension
            language=language,
            version=datasets.Version("1.0.0"),
        )
        for language in _languages()
    ]
    BUILDER_CONFIG_CLASS = GlotConfig

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "Source": datasets.Value("string"),
                    "Content": datasets.Value("string"),
                    "Length": datasets.Value("int64"),
                    "Script": datasets.Value("string"),
                    "ISO639-3": datasets.Value("string"),
                    "Language": datasets.Value("string"),
                }
            ),
            supervised_keys=None,
            homepage=_URL,
            citation=_CITATION,
            license=_LICENSE,
        )

    def _split_generators(self, dl_manager):

        data_urls = [self.config.base_data_path]
        doc_files = dl_manager.download(
            [url for url in data_urls if url.endswith(".csv")]
        )
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN, gen_kwargs={"doc_files": doc_files}
            ),
        ]


    def _generate_tables(self, doc_files):
        """This function returns the data by iterating on all the files."""
        for doc_i, doc_path in enumerate(doc_files):
            df = pd.read_csv(doc_path)

            for index, row in df.iterrows():
                yield f"{doc_i}_{index}", {
                            "ISO639-3": row["ISO639-3"],
                            "Language": row["Language"],
                            "Content": row["Content"],
                            "Script": row["Script"],
                            "Length": row["Length"],
                            "Source": row["Source"],
                            }