holylovenia commited on
Commit
f641853
·
1 Parent(s): cd0f166

Upload cc100.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. cc100.py +246 -0
cc100.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ This corpus is an attempt to recreate the dataset used for training XLM-R. This
18
+ corpus comprises of monolingual data for 100+ languages and also includes data
19
+ for romanized languages (indicated by *_rom). This was constructed using the
20
+ urls and paragraph indices provided by the CC-Net repository by processing
21
+ January-December 2018 Commoncrawl snapshots. Each file comprises of documents
22
+ separated by double-newlines and paragraphs within the same document separated
23
+ by a newline. The data is generated using the open source CC-Net repository. No
24
+ claims of intellectual property are made on the work of preparation of the
25
+ corpus.
26
+
27
+ This contains the Indonesian (ind), the Javanese (jav), and the Sundanese (sun) subset.
28
+
29
+ [nusantara_schema_name] = ssp
30
+ """
31
+
32
+ from posixpath import split
33
+ from typing import Dict, List, Tuple
34
+
35
+ import datasets
36
+
37
+ from nusacrowd.utils import schemas
38
+ from nusacrowd.utils.configs import NusantaraConfig
39
+ from nusacrowd.utils.constants import (DEFAULT_NUSANTARA_VIEW_NAME,
40
+ DEFAULT_SOURCE_VIEW_NAME, Tasks)
41
+
42
+ _DATASETNAME = "cc100"
43
+ _SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
44
+ _UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
45
+
46
+ _LANGUAGES = ["ind", "jav", "sun"] # We follow ISO639-3 language code (https://iso639-3.sil.org/code_tables/639/data)
47
+ _LOCAL = False
48
+
49
+ _CITATION = """\
50
+ @inproceedings{conneau-etal-2020-unsupervised,
51
+ title = "Unsupervised Cross-lingual Representation Learning at Scale",
52
+ author = "Conneau, Alexis and
53
+ Khandelwal, Kartikay and
54
+ Goyal, Naman and
55
+ Chaudhary, Vishrav and
56
+ Wenzek, Guillaume and
57
+ Guzm{'a}n, Francisco and
58
+ Grave, Edouard and
59
+ Ott, Myle and
60
+ Zettlemoyer, Luke and
61
+ Stoyanov, Veselin",
62
+ booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
63
+ month = jul,
64
+ year = "2020",
65
+ address = "Online",
66
+ publisher = "Association for Computational Linguistics",
67
+ url = "https://www.aclweb.org/anthology/2020.acl-main.747",
68
+ doi = "10.18653/v1/2020.acl-main.747",
69
+ pages = "8440--8451",
70
+ abstract = "This paper shows that pretraining multilingual language models
71
+ at scale leads to significant performance gains for a wide range of
72
+ cross-lingual transfer tasks. We train a Transformer-based masked language
73
+ model on one hundred languages, using more than two terabytes of filtered
74
+ CommonCrawl data. Our model, dubbed XLM-R, significantly outperforms
75
+ multilingual BERT (mBERT) on a variety of cross-lingual benchmarks,
76
+ including +14.6{%} average accuracy on XNLI, +13{%} average F1 score on
77
+ MLQA, and +2.4{%} F1 score on NER. XLM-R performs particularly well on
78
+ low-resource languages, improving 15.7{%} in XNLI accuracy for Swahili and
79
+ 11.4{%} for Urdu over previous XLM models. We also present a detailed
80
+ empirical analysis of the key factors that are required to achieve these
81
+ gains, including the trade-offs between (1) positive transfer and capacity
82
+ dilution and (2) the performance of high and low resource languages at
83
+ scale. Finally, we show, for the first time, the possibility of
84
+ multilingual modeling without sacrificing per-language performance; XLM-R
85
+ is very competitive with strong monolingual models on the GLUE and XNLI
86
+ benchmarks. We will make our code and models publicly available.",
87
+ }
88
+
89
+ @inproceedings{wenzek-etal-2020-ccnet,
90
+ title = "{CCN}et: Extracting High Quality Monolingual Datasets from Web Crawl Data",
91
+ author = "Wenzek, Guillaume and
92
+ Lachaux, Marie-Anne and
93
+ Conneau, Alexis and
94
+ Chaudhary, Vishrav and
95
+ Guzm{'a}n, Francisco and
96
+ Joulin, Armand and
97
+ Grave, Edouard",
98
+ booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
99
+ month = may,
100
+ year = "2020",
101
+ address = "Marseille, France",
102
+ publisher = "European Language Resources Association",
103
+ url = "https://www.aclweb.org/anthology/2020.lrec-1.494",
104
+ pages = "4003--4012",
105
+ abstract = "Pre-training text representations have led to significant
106
+ improvements in many areas of natural language processing. The quality of
107
+ these models benefits greatly from the size of the pretraining corpora as
108
+ long as its quality is preserved. In this paper, we describe an automatic
109
+ pipeline to extract massive high-quality monolingual datasets from Common
110
+ Crawl for a variety of languages. Our pipeline follows the data processing
111
+ introduced in fastText (Mikolov et al., 2017; Grave et al., 2018), that
112
+ deduplicates documents and identifies their language. We augment this
113
+ pipeline with a filtering step to select documents that are close to high
114
+ quality corpora like Wikipedia.",
115
+ language = "English",
116
+ ISBN = "979-10-95546-34-4",
117
+ }
118
+ """
119
+
120
+ _DESCRIPTION = """\
121
+ This corpus is an attempt to recreate the dataset used for training
122
+ XLM-R. This corpus comprises of monolingual data for 100+ languages and
123
+ also includes data for romanized languages (indicated by *_rom). This
124
+ was constructed using the urls and paragraph indices provided by the
125
+ CC-Net repository by processing January-December 2018 Commoncrawl
126
+ snapshots. Each file comprises of documents separated by
127
+ double-newlines and paragraphs within the same document separated by a
128
+ newline. The data is generated using the open source CC-Net repository.
129
+ No claims of intellectual property are made on the work of preparation
130
+ of the corpus.
131
+ """
132
+
133
+ _HOMEPAGE = "https://data.statmt.org/cc-100/"
134
+
135
+ _LICENSE = "MIT"
136
+
137
+ _LANGUAGES_MAP = {
138
+ "ind": "id",
139
+ "jav": "jv",
140
+ "sun": "su",
141
+ }
142
+
143
+ _URLS = {
144
+ "train": "https://data.statmt.org/cc-100/{lang}.txt.xz",
145
+ }
146
+
147
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
148
+
149
+ _SOURCE_VERSION = "2018.12.01"
150
+
151
+ _NUSANTARA_VERSION = "1.0.0"
152
+
153
+ def nusantara_config_constructor(lang, schema, version):
154
+ """Construct NusantaraConfig with cc100_{lang}_{schema} as the name format."""
155
+ if schema != "source" and schema != "nusantara_ssp":
156
+ raise ValueError(f"Invalid schema: {schema}")
157
+
158
+ if lang == "":
159
+ raise ValueError(f"Language is required. Choose one of these languages: {_LANGUAGES}.")
160
+ elif lang in _LANGUAGES:
161
+ return NusantaraConfig(
162
+ name=f"cc100_{lang}_{schema}",
163
+ version=datasets.Version(version),
164
+ description=f"CC100 with {schema} schema for {lang} language",
165
+ schema=schema,
166
+ subset_id="cc100",
167
+ )
168
+ else:
169
+ raise ValueError(f"Invalid language: {lang}. Choose one of these languages: {_LANGUAGES}.")
170
+
171
+
172
+ class CC100(datasets.GeneratorBasedBuilder):
173
+ """Monolingual Datasets from Web Crawl Data."""
174
+
175
+ DEFAULT_CONFIG_NAME = "cc100_jav_source"
176
+
177
+ BUILDER_CONFIGS = [
178
+ nusantara_config_constructor(lang, "source", _SOURCE_VERSION) for lang in _LANGUAGES_MAP
179
+ ] + [
180
+ nusantara_config_constructor(lang, "nusantara_ssp", _NUSANTARA_VERSION) for lang in _LANGUAGES_MAP
181
+ ]
182
+
183
+ def _info(self) -> datasets.DatasetInfo:
184
+ if self.config.schema == "source":
185
+ features = datasets.Features(
186
+ {
187
+ "id": datasets.Value("string"),
188
+ "text": datasets.Value("string"),
189
+ }
190
+ )
191
+ elif self.config.schema == "nusantara_ssp":
192
+ features = schemas.self_supervised_pretraining.features
193
+
194
+ return datasets.DatasetInfo(
195
+ description=_DESCRIPTION,
196
+ features=features,
197
+ homepage=_HOMEPAGE,
198
+ license=_LICENSE,
199
+ citation=_CITATION,
200
+ )
201
+
202
+ def _split_generators(self, dl_manager) -> List[datasets.SplitGenerator]:
203
+ """Returns SplitGenerators."""
204
+
205
+ split_name = self.config.name.split("_")
206
+ if split_name[1] == "source" or split_name[1] == "nusantara":
207
+ lang = _DEFAULT_LANGUAGE
208
+ else:
209
+ lang = split_name[1]
210
+ url = _URLS["train"].format(lang=_LANGUAGES_MAP[lang])
211
+ path = dl_manager.download_and_extract(url)
212
+
213
+ return [
214
+ datasets.SplitGenerator(
215
+ name=datasets.Split.TRAIN,
216
+ gen_kwargs={
217
+ "filepath": path,
218
+ "split": "train",
219
+ },
220
+ ),
221
+ ]
222
+
223
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
224
+ """Yields examples as (key, example) tuples."""
225
+
226
+ with open(filepath, encoding="utf-8") as f:
227
+ if self.config.schema == "source":
228
+ for counter, row in enumerate(f):
229
+ if row.strip() != "":
230
+ yield (
231
+ counter,
232
+ {
233
+ "id": str(counter),
234
+ "text": row.strip(),
235
+ },
236
+ )
237
+ elif self.config.schema == "nusantara_ssp":
238
+ for counter, row in enumerate(f):
239
+ if row.strip() != "":
240
+ yield (
241
+ counter,
242
+ {
243
+ "id": str(counter),
244
+ "text": row.strip(),
245
+ },
246
+ )