albertvillanova HF staff commited on
Commit
0bad575
·
verified ·
1 Parent(s): ce078cf

Refactor script

Browse files
Files changed (1) hide show
  1. wikipedia.py +50 -50
wikipedia.py CHANGED
@@ -913,13 +913,11 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
913
  citation=_CITATION,
914
  )
915
 
916
- def _split_generators(self, dl_manager):
917
- def _base_url(lang):
918
- return _BASE_URL_TMPL.format(lang=lang.replace("-", "_"), date=self.config.date)
919
-
920
- lang = self.config.language
921
 
922
- info_url = _base_url(lang) + _INFO_FILE
 
923
  # Use dictionary since testing mock always returns the same result.
924
  downloaded_files = dl_manager.download_and_extract({"info": info_url})
925
 
@@ -931,7 +929,7 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
931
  assert (
932
  multistream_dump_info["status"] == "done"
933
  ), "Specified dump (%s) multistream status is not 'done': %s" % (
934
- _base_url(lang),
935
  multistream_dump_info["status"],
936
  )
937
 
@@ -939,7 +937,7 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
939
  if ".xml" not in fname:
940
  continue
941
  total_bytes += info["size"]
942
- xml_urls.append(_base_url(lang) + fname)
943
 
944
  # Use dictionary since testing mock always returns the same result.
945
  downloaded_files = dl_manager.download({"xml": xml_urls})
@@ -951,54 +949,56 @@ class Wikipedia(datasets.GeneratorBasedBuilder):
951
  ]
952
 
953
  def _generate_examples(self, filepaths):
954
- def _extract_content(filepath):
955
- """Extracts article content from a single WikiMedia XML file."""
956
- logger.info("generating examples from = %s", filepath)
957
- with open(filepath, "rb") as f:
958
- f = bz2.BZ2File(filename=f)
959
- # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
960
- utf_f = codecs.getreader("utf-8")(f)
961
- context = etree.iterparse(utf_f, events=("end",))
962
- for unused_event, elem in context:
963
- if not elem.tag.endswith("page"):
964
- continue
965
- namespace = elem.tag[:-4]
966
- title = elem.find(f"./{namespace}title").text
967
- ns = elem.find(f"./{namespace}ns").text
968
- id_ = elem.find(f"./{namespace}id").text
969
- red_ = elem.find(f"./{namespace}redirect")
970
 
971
- # Filter pages that are not in the "main" namespace.
972
- if ns != "0":
973
- elem.clear()
974
- continue
 
 
 
 
 
 
 
 
 
 
 
 
975
 
976
- raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
977
- elem.clear()
 
 
978
 
979
- # Filter redirects.
980
- if raw_content is None or red_ is not None:
981
- continue
982
 
983
- yield id_, title, raw_content
 
 
984
 
985
- def _clean_content(inputs, language):
986
- """Cleans raw wikicode to extract text."""
987
- id_, title, raw_content = inputs
988
- try:
989
- text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
990
- except mwparserfromhell.parser.ParserError as e:
991
- logger.error("mwparserfromhell ParseError: %s", e)
992
- return
993
- if not text:
994
- return
995
- url = _construct_url(title, language)
996
- yield id_, {"id": id_, "url": url, "title": title, "text": text}
997
 
998
- # No shuffle anymore
999
- for filepath in filepaths:
1000
- for id_, title, raw_content in _extract_content(filepath):
1001
- yield from _clean_content((id_, title, raw_content), self.config.language)
 
 
 
 
 
 
 
 
 
1002
 
1003
 
1004
  def _parse_and_clean_wikicode(raw_content, parser, language):
 
913
  citation=_CITATION,
914
  )
915
 
916
+ def _get_base_url(self):
917
+ return _BASE_URL_TMPL.format(lang=self.config.language.replace("-", "_"), date=self.config.date)
 
 
 
918
 
919
+ def _split_generators(self, dl_manager):
920
+ info_url = self._get_base_url() + _INFO_FILE
921
  # Use dictionary since testing mock always returns the same result.
922
  downloaded_files = dl_manager.download_and_extract({"info": info_url})
923
 
 
929
  assert (
930
  multistream_dump_info["status"] == "done"
931
  ), "Specified dump (%s) multistream status is not 'done': %s" % (
932
+ self._get_base_url(),
933
  multistream_dump_info["status"],
934
  )
935
 
 
937
  if ".xml" not in fname:
938
  continue
939
  total_bytes += info["size"]
940
+ xml_urls.append(self._get_base_url() + fname)
941
 
942
  # Use dictionary since testing mock always returns the same result.
943
  downloaded_files = dl_manager.download({"xml": xml_urls})
 
949
  ]
950
 
951
  def _generate_examples(self, filepaths):
952
+ # No shuffle anymore
953
+ for filepath in filepaths:
954
+ for id_, title, raw_content in _extract_content(filepath):
955
+ yield from _clean_content((id_, title, raw_content), self.config.language)
956
+
 
 
 
 
 
 
 
 
 
 
 
957
 
958
+ def _extract_content(filepath):
959
+ """Extracts article content from a single WikiMedia XML file."""
960
+ logger.info("generating examples from = %s", filepath)
961
+ with open(filepath, "rb") as f:
962
+ f = bz2.BZ2File(filename=f)
963
+ # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563
964
+ utf_f = codecs.getreader("utf-8")(f)
965
+ context = etree.iterparse(utf_f, events=("end",))
966
+ for unused_event, elem in context:
967
+ if not elem.tag.endswith("page"):
968
+ continue
969
+ namespace = elem.tag[:-4]
970
+ title = elem.find(f"./{namespace}title").text
971
+ ns = elem.find(f"./{namespace}ns").text
972
+ id_ = elem.find(f"./{namespace}id").text
973
+ red_ = elem.find(f"./{namespace}redirect")
974
 
975
+ # Filter pages that are not in the "main" namespace.
976
+ if ns != "0":
977
+ elem.clear()
978
+ continue
979
 
980
+ raw_content = elem.find(f"./{namespace}revision/{namespace}text").text
981
+ elem.clear()
 
982
 
983
+ # Filter redirects.
984
+ if raw_content is None or red_ is not None:
985
+ continue
986
 
987
+ yield id_, title, raw_content
 
 
 
 
 
 
 
 
 
 
 
988
 
989
+
990
+ def _clean_content(inputs, language):
991
+ """Cleans raw wikicode to extract text."""
992
+ id_, title, raw_content = inputs
993
+ try:
994
+ text = _parse_and_clean_wikicode(raw_content, parser=mwparserfromhell, language=language)
995
+ except mwparserfromhell.parser.ParserError as e:
996
+ logger.error("mwparserfromhell ParseError: %s", e)
997
+ return
998
+ if not text:
999
+ return
1000
+ url = _construct_url(title, language)
1001
+ yield id_, {"id": id_, "url": url, "title": title, "text": text}
1002
 
1003
 
1004
  def _parse_and_clean_wikicode(raw_content, parser, language):