MichaelR207 commited on
Commit
0b99a0b
1 Parent(s): d220f97

Fix HuggingFace Download

Browse files
Files changed (1) hide show
  1. MultilingualSimplification.py +85 -64
MultilingualSimplification.py CHANGED
@@ -17,6 +17,8 @@
17
  import pandas as pd
18
  import os
19
  from collections import defaultdict
 
 
20
 
21
  import datasets
22
 
@@ -70,116 +72,124 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
70
  SOFTWARE."""
71
 
72
  _SUBCORPORA = {
73
- "NewselaEN": {
74
- "path": "./data/English/Newsela EN",
75
- "language": "en"
76
- },
77
  "WikiAutoEN": {
78
- "path": "./data/English/WikiAuto",
79
  "language": "en"
80
  },
81
  "ASSET": {
82
- "path": "./data/English/ASSET",
83
  "language": "en"
84
  },
85
- "Simplext": {
86
- "path": "./data/Spanish/Simplext",
87
- "language": "es"
88
- },
89
- "NewselaES": {
90
- "path": "./data/Spanish/Newsela ES",
91
- "language": "es"
92
- },
93
  "Terence": {
94
- "path" : "./data/Italian/Terence",
95
  "language": "it"
96
  },
97
  "Teacher": {
98
- "path": "./data/Italian/Teacher",
99
  "language": "it"
100
  },
101
  "SimpitikiWiki": {
102
- "path": "./data/Italian/Simpitiki Italian Wikipedia",
103
  "language": "it"
104
  },
105
  "AdminIt": {
106
- "path": "./data/Italian/AdminIT",
107
  "language": "it"
108
  },
109
  "PaCCSS-IT": {
110
- "path": "./data/Italian/PaCCSS-IT Corpus",
111
  "language": "it"
112
  },
113
  "CLEAR" : {
114
- "path" : "./data/French/CLEAR Corpus",
115
  "language": "fr"
116
  },
117
  "WikiLargeFR": {
118
- "path" : "./data/French/WikiLargeFR Corpus",
119
  "language": "fr"
120
  },
121
  "EasyJapanese": {
122
- "path": "./data/Japanese/Easy Japanese Corpus",
123
  "language": "ja"
124
  },
125
  "EasyJapaneseExtended": {
126
- "path": "./data/Japanese/Easy Japanese Extended",
127
  "language": "ja"
128
  },
129
  "PorSimples" : {
130
- "path": "./data/Brazilian Portuguese/PorSimples",
131
  "language": "pt-br"
132
  },
133
  "TextComplexityDE" : {
134
- "path": "./data/German/TextComplexityDE Parallel Corpus",
135
  "language": "de"
136
  },
137
  "GEOLinoTest" : {
138
- "path" : "./data/German/GEOLino Corpus",
139
  "language": "de"
140
  },
141
- "GermanNews" : {
142
- "path" : "./data/German/German News",
143
- "language": "de"
144
- },
145
- "CBST": {
146
- "path" : "./data/Basque/CBST",
147
- "language": "eu"
148
- },
149
- "DSim": {
150
- "path": "./data/Danish/DSim Corpus",
151
- "language": "da"
152
- },
153
- "SimplifyUR": {
154
- "path": "./data/Urdu/SimplifyUR",
155
- "language": "ur"
156
- },
157
  "RuWikiLarge": {
158
- "path" : "./data/Russian/RuWikiLarge",
159
  "language": "ru"
160
  },
161
  "RSSE" : {
162
- "path": "./data/Russian/RSSE Corpus",
163
- "language": "ru"
164
- },
165
- "RuAdaptLit" : {
166
- "path": "./data/Russian/RuAdapt Literature",
167
  "language": "ru"
168
  },
 
 
 
 
169
  "RuAdaptFairytales" : {
170
- "path": "./data/Russian/RuAdapt Fairytales",
171
  "language": "ru"
172
  },
173
  "RuAdaptEncy" : {
174
- "path" : "./data/Russian/RuAdapt Ency",
175
  "language": "ru"
176
  },
177
  "TSSlovene" : {
178
- "path" : "./data/Slovene/Text Simplification Slovene",
179
  "language": "sl"
180
  }
181
  }
182
 
 
 
 
 
 
 
 
 
183
  _LANGUAGES = {
184
  "English":'en',
185
  "Spanish":'es',
@@ -290,25 +300,34 @@ class MultilingualSimplification(datasets.GeneratorBasedBuilder):
290
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
291
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
292
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
293
- filepaths = []
294
  if (self.config.name == 'all'):
295
  for subcorpus in _SUBCORPORA:
296
- filepaths.append(_SUBCORPORA[subcorpus]['path'])
 
 
297
  elif (self.config.name in _LANGUAGES):
298
  lang_code = _LANGUAGES[self.config.name]
299
  for subcorpus in _SUBCORPORA:
300
  if _SUBCORPORA[subcorpus]['language'] == lang_code:
301
- filepaths.append(_SUBCORPORA[subcorpus]['path'])
 
 
302
  elif (self.config.name in _SUBCORPORA):
303
- filepaths = [_SUBCORPORA[self.config.name]['path']]
 
 
304
  else:
305
  print("Invalid configuration name: " + self.config.name + ". Try 'all', 'English', 'ASSET', etc.")
 
 
 
306
  return [
307
  datasets.SplitGenerator(
308
  name=datasets.Split.TRAIN,
309
  # These kwargs will be passed to _generate_examples
310
  gen_kwargs={
311
- "filepaths": filepaths,
312
  "split": "train",
313
  },
314
  ),
@@ -316,7 +335,7 @@ class MultilingualSimplification(datasets.GeneratorBasedBuilder):
316
  name=datasets.Split.VALIDATION,
317
  # These kwargs will be passed to _generate_examples
318
  gen_kwargs={
319
- "filepaths": filepaths,
320
  "split": "val",
321
  },
322
  ),
@@ -324,7 +343,7 @@ class MultilingualSimplification(datasets.GeneratorBasedBuilder):
324
  name=datasets.Split.TEST,
325
  # These kwargs will be passed to _generate_examples
326
  gen_kwargs={
327
- "filepaths": filepaths,
328
  "split": "test"
329
  },
330
  ),
@@ -336,16 +355,18 @@ class MultilingualSimplification(datasets.GeneratorBasedBuilder):
336
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
337
  df = pd.DataFrame()
338
 
339
- if (len(filepaths) > 1):
340
- for filepath in filepaths:
341
- if os.path.exists(filepath + "_" + split + ".csv"):
342
- df = pd.concat([df, pd.read_csv(filepath + "_" + split + ".csv")])
 
343
 
344
  # shuffle the combined dataset
345
  df = df.sample(frac=1, random_state=3600).reset_index(drop=True)
346
  else:
347
- if os.path.exists(filepaths[0] + "_" + split + ".csv"):
348
- df = pd.read_csv(filepaths[0] + "_" + split + ".csv")
 
349
 
350
  if len(df) > 0:
351
  for key, row in df.iterrows():
 
17
  import pandas as pd
18
  import os
19
  from collections import defaultdict
20
+ import urllib.parse
21
+
22
 
23
  import datasets
24
 
 
72
  SOFTWARE."""
73
 
74
  _SUBCORPORA = {
75
+ # "NewselaEN": {
76
+ # "path": "data/English/Newsela EN",
77
+ # "language": "en"
78
+ # },
79
  "WikiAutoEN": {
80
+ "path": "data/English/WikiAuto",
81
  "language": "en"
82
  },
83
  "ASSET": {
84
+ "path": "data/English/ASSET",
85
  "language": "en"
86
  },
87
+ # "Simplext": {
88
+ # "path": "data/Spanish/Simplext",
89
+ # "language": "es"
90
+ # },
91
+ # "NewselaES": {
92
+ # "path": "data/Spanish/Newsela ES",
93
+ # "language": "es"
94
+ # },
95
  "Terence": {
96
+ "path" : "data/Italian/Terence",
97
  "language": "it"
98
  },
99
  "Teacher": {
100
+ "path": "data/Italian/Teacher",
101
  "language": "it"
102
  },
103
  "SimpitikiWiki": {
104
+ "path": "data/Italian/Simpitiki Italian Wikipedia",
105
  "language": "it"
106
  },
107
  "AdminIt": {
108
+ "path": "data/Italian/AdminIT",
109
  "language": "it"
110
  },
111
  "PaCCSS-IT": {
112
+ "path": "data/Italian/PaCCSS-IT Corpus",
113
  "language": "it"
114
  },
115
  "CLEAR" : {
116
+ "path" : "data/French/CLEAR Corpus",
117
  "language": "fr"
118
  },
119
  "WikiLargeFR": {
120
+ "path" : "data/French/WikiLargeFR Corpus",
121
  "language": "fr"
122
  },
123
  "EasyJapanese": {
124
+ "path": "data/Japanese/Easy Japanese Corpus",
125
  "language": "ja"
126
  },
127
  "EasyJapaneseExtended": {
128
+ "path": "data/Japanese/Easy Japanese Extended",
129
  "language": "ja"
130
  },
131
  "PorSimples" : {
132
+ "path": "data/Brazilian Portuguese/PorSimples",
133
  "language": "pt-br"
134
  },
135
  "TextComplexityDE" : {
136
+ "path": "data/German/TextComplexityDE Parallel Corpus",
137
  "language": "de"
138
  },
139
  "GEOLinoTest" : {
140
+ "path" : "data/German/GEOLino Corpus",
141
  "language": "de"
142
  },
143
+ # "GermanNews" : {
144
+ # "path" : "data/German/German News",
145
+ # "language": "de"
146
+ # },
147
+ # "CBST": {
148
+ # "path" : "data/Basque/CBST",
149
+ # "language": "eu"
150
+ # },
151
+ # "DSim": {
152
+ # "path": "data/Danish/DSim Corpus",
153
+ # "language": "da"
154
+ # },
155
+ # "SimplifyUR": {
156
+ # "path": "data/Urdu/SimplifyUR",
157
+ # "language": "ur"
158
+ # },
159
  "RuWikiLarge": {
160
+ "path" : "data/Russian/RuWikiLarge",
161
  "language": "ru"
162
  },
163
  "RSSE" : {
164
+ "path": "data/Russian/RSSE Corpus",
 
 
 
 
165
  "language": "ru"
166
  },
167
+ # "RuAdaptLit" : {
168
+ # "path": "data/Russian/RuAdapt Literature",
169
+ # "language": "ru"
170
+ # },
171
  "RuAdaptFairytales" : {
172
+ "path": "data/Russian/RuAdapt Fairytales",
173
  "language": "ru"
174
  },
175
  "RuAdaptEncy" : {
176
+ "path" : "data/Russian/RuAdapt Ency",
177
  "language": "ru"
178
  },
179
  "TSSlovene" : {
180
+ "path" : "data/Slovene/Text Simplification Slovene",
181
  "language": "sl"
182
  }
183
  }
184
 
185
+ _URL = "https://huggingface.co/datasets/MichaelR207/MultiSim/raw/main"
186
+
187
+ _URLS = {
188
+ dataset+"-"+split: urllib.parse.quote(os.path.join(_URL, _SUBCORPORA[dataset]["path"] + "_" + split + ".csv"), safe=':/')
189
+ for split in ["train", "val", "test"]
190
+ for dataset in _SUBCORPORA.keys()
191
+ }
192
+
193
  _LANGUAGES = {
194
  "English":'en',
195
  "Spanish":'es',
 
300
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
301
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
302
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
303
+ download_urls = {}
304
  if (self.config.name == 'all'):
305
  for subcorpus in _SUBCORPORA:
306
+ download_urls[subcorpus + "-train"] = _URLS[subcorpus+"-train"]
307
+ download_urls[subcorpus + "-test"] = _URLS[subcorpus+"-test"]
308
+ download_urls[subcorpus + "-val"] = _URLS[subcorpus+"-val"]
309
  elif (self.config.name in _LANGUAGES):
310
  lang_code = _LANGUAGES[self.config.name]
311
  for subcorpus in _SUBCORPORA:
312
  if _SUBCORPORA[subcorpus]['language'] == lang_code:
313
+ download_urls[subcorpus + "-train"] = _URLS[subcorpus+"-train"]
314
+ download_urls[subcorpus + "-test"] = _URLS[subcorpus+"-test"]
315
+ download_urls[subcorpus + "-val"] = _URLS[subcorpus+"-val"]
316
  elif (self.config.name in _SUBCORPORA):
317
+ download_urls[self.config.name + "-train"] = _URLS[self.config.name+"-train"]
318
+ download_urls[self.config.name + "-test"] = _URLS[self.config.name+"-test"]
319
+ download_urls[self.config.name + "-val"] = _URLS[self.config.name+"-val"]
320
  else:
321
  print("Invalid configuration name: " + self.config.name + ". Try 'all', 'English', 'ASSET', etc.")
322
+
323
+ downloaded_files = dl_manager.download_and_extract(download_urls)
324
+
325
  return [
326
  datasets.SplitGenerator(
327
  name=datasets.Split.TRAIN,
328
  # These kwargs will be passed to _generate_examples
329
  gen_kwargs={
330
+ "filepaths": downloaded_files,
331
  "split": "train",
332
  },
333
  ),
 
335
  name=datasets.Split.VALIDATION,
336
  # These kwargs will be passed to _generate_examples
337
  gen_kwargs={
338
+ "filepaths": downloaded_files,
339
  "split": "val",
340
  },
341
  ),
 
343
  name=datasets.Split.TEST,
344
  # These kwargs will be passed to _generate_examples
345
  gen_kwargs={
346
+ "filepaths": downloaded_files,
347
  "split": "test"
348
  },
349
  ),
 
355
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
356
  df = pd.DataFrame()
357
 
358
+ if (len(filepaths.keys()) > 1):
359
+ for dataset, path in filepaths.items():
360
+ if os.path.exists(path):
361
+ if dataset.endswith("-"+split):
362
+ df = pd.concat([df, pd.read_csv(path)])
363
 
364
  # shuffle the combined dataset
365
  df = df.sample(frac=1, random_state=3600).reset_index(drop=True)
366
  else:
367
+ dataset = list(filepaths.keys())[0]
368
+ if os.path.exists(filepaths[dataset]):
369
+ df = pd.read_csv(filepaths[dataset])
370
 
371
  if len(df) > 0:
372
  for key, row in df.iterrows():