joelniklaus commited on
Commit
13d54f1
1 Parent(s): fd5d204

fixed bug in data loader for de, en, and es

Browse files
Files changed (1) hide show
  1. mc4_legal.py +13 -7
mc4_legal.py CHANGED
@@ -3,6 +3,7 @@ import ast
3
  import json
4
 
5
  import datasets
 
6
 
7
  try:
8
  import lzma as xz
@@ -19,7 +20,6 @@ _CITATION = """
19
  """
20
 
21
  _URL = "https://huggingface.co/datasets/joelito/mc4_legal"
22
- _DATA_URL = f"{_URL}/resolve/main/data"
23
 
24
  _LANGUAGES = [
25
  "bg",
@@ -85,13 +85,16 @@ class MC4Legal(datasets.GeneratorBasedBuilder):
85
  )
86
 
87
  def _split_generators(self, dl_manager):
 
 
 
88
  data_urls = []
89
  languages = _LANGUAGES if self.config.name == "all" else [self.config.name]
90
  for language in languages:
91
  if language in ["de", "en", "es"]: # here we need to chunk because the files are too large
92
- data_urls.append([f"{_DATA_URL}/{language}_{idx}.jsonl.xz" for idx in [0, 1]])
93
  else:
94
- data_urls.append(f"{_DATA_URL}/{language}.jsonl.xz")
95
 
96
  downloaded_files = dl_manager.download(data_urls)
97
  return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files})]
@@ -107,14 +110,17 @@ class MC4Legal(datasets.GeneratorBasedBuilder):
107
  if line:
108
  example = json.loads(line)
109
  if example is not None and isinstance(example, dict):
 
 
 
 
110
  yield id_, {
111
  "index": example.get("index", ""),
112
  "url": example.get("url", ""),
113
- # remove the Z at the end (time zone)
114
- "timestamp": example.get("timestamp", "")[:-1],
115
  "matches": ast.literal_eval(example.get("matches", "")),
116
  "text": example.get("text", ""),
117
  }
118
  id_ += 1
119
- except:
120
- print("Error reading file:", filepath)
 
3
  import json
4
 
5
  import datasets
6
+ from huggingface_hub.file_download import hf_hub_url
7
 
8
  try:
9
  import lzma as xz
 
20
  """
21
 
22
  _URL = "https://huggingface.co/datasets/joelito/mc4_legal"
 
23
 
24
  _LANGUAGES = [
25
  "bg",
 
85
  )
86
 
87
  def _split_generators(self, dl_manager):
88
+ def get_url(file_name):
89
+ return hf_hub_url(repo_id="joelito/mc4_legal", filename=f"data/{file_name}.jsonl.xz", repo_type="dataset")
90
+
91
  data_urls = []
92
  languages = _LANGUAGES if self.config.name == "all" else [self.config.name]
93
  for language in languages:
94
  if language in ["de", "en", "es"]: # here we need to chunk because the files are too large
95
+ data_urls.extend([get_url(f"{language}_{idx}") for idx in [0, 1]])
96
  else:
97
+ data_urls.append(get_url(language))
98
 
99
  downloaded_files = dl_manager.download(data_urls)
100
  return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_files})]
 
110
  if line:
111
  example = json.loads(line)
112
  if example is not None and isinstance(example, dict):
113
+ timestamp = example.get("timestamp", "")
114
+ # remove the Z at the end (time zone)
115
+ if isinstance(timestamp, str) and timestamp.endswith("Z"):
116
+ timestamp = timestamp[:-1]
117
  yield id_, {
118
  "index": example.get("index", ""),
119
  "url": example.get("url", ""),
120
+ "timestamp": timestamp,
 
121
  "matches": ast.literal_eval(example.get("matches", "")),
122
  "text": example.get("text", ""),
123
  }
124
  id_ += 1
125
+ except Exception:
126
+ logger.exception("Error while processing file %s", filepath)