holylovenia
commited on
Commit
•
46c7c7a
1
Parent(s):
da75515
Upload nllb_seed.py with huggingface_hub
Browse files- nllb_seed.py +33 -43
nllb_seed.py
CHANGED
@@ -28,7 +28,7 @@ To create a dataset loading script you will create a class and implement 3 metho
|
|
28 |
|
29 |
TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
|
30 |
|
31 |
-
[
|
32 |
"""
|
33 |
import os
|
34 |
from pathlib import Path
|
@@ -36,9 +36,9 @@ from typing import Dict, List, Tuple
|
|
36 |
|
37 |
import datasets
|
38 |
|
39 |
-
from
|
40 |
-
from
|
41 |
-
from
|
42 |
|
43 |
# TODO: Add BibTeX citation
|
44 |
_CITATION = """\
|
@@ -77,7 +77,7 @@ _LICENSE = "CC-BY-SA 4.0"
|
|
77 |
# For local datasets, this variable can be an empty dictionary.
|
78 |
|
79 |
# For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
|
80 |
-
# In most cases the URLs will be the same for the source and
|
81 |
# However, if you need to access different files for each config you can have multiple entries in this dict.
|
82 |
# This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
|
83 |
_URLS = {
|
@@ -91,18 +91,18 @@ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION] # example: [Tasks.TRANSLATION, T
|
|
91 |
# This version doesn't have to be consistent with semantic versioning. Anything that is
|
92 |
# provided by the original dataset as a version goes.
|
93 |
_SOURCE_VERSION = "1.0.0"
|
94 |
-
|
95 |
_LOCAL = False
|
96 |
|
97 |
|
98 |
-
def
|
99 |
if lang == "":
|
100 |
raise ValueError(f"Invalid lang {lang}")
|
101 |
|
102 |
-
if schema != "source" and schema != "
|
103 |
raise ValueError(f"Invalid schema: {schema}")
|
104 |
|
105 |
-
return
|
106 |
name="nllb_seed_{lang}_{schema}".format(lang=lang, schema=schema),
|
107 |
version=datasets.Version(version),
|
108 |
description="nllb_seed {schema} schema for {lang} language".format(lang=_LANGUAGE_MAP[lang], schema=schema),
|
@@ -116,42 +116,42 @@ class NLLBSeed(datasets.GeneratorBasedBuilder):
|
|
116 |
"""TODO: Short description of my dataset."""
|
117 |
|
118 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
119 |
-
|
120 |
|
121 |
-
# You will be able to load the "source" or "
|
122 |
# ds_source = datasets.load_dataset('my_dataset', name='source')
|
123 |
-
#
|
124 |
|
125 |
# For local datasets you can make use of the `data_dir` and `data_files` kwargs
|
126 |
# https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
|
127 |
# ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
|
128 |
-
#
|
129 |
|
130 |
# TODO: For each dataset, implement Config for Source and Nusantara;
|
131 |
-
# If dataset contains more than one subset (see
|
132 |
# Each of them should contain:
|
133 |
-
# - name: should be unique for each dataset config eg. smsa_(source|
|
134 |
-
# - version: option = (SOURCE_VERSION|
|
135 |
# - description: one line description for the dataset
|
136 |
-
# - schema: options = (source|
|
137 |
# - subset_id: subset id is the canonical name for the dataset (eg. smsa)
|
138 |
-
# where [
|
139 |
|
140 |
-
BUILDER_CONFIGS = [
|
141 |
"""
|
142 |
BUILDER_CONFIGS = [
|
143 |
-
|
144 |
name="nllb_seed_source",
|
145 |
version=SOURCE_VERSION,
|
146 |
description="nllb_seed source schema",
|
147 |
schema="source",
|
148 |
subset_id="nllb_seed",
|
149 |
),
|
150 |
-
|
151 |
-
name="
|
152 |
-
version=
|
153 |
description="nllb_seed Nusantara schema",
|
154 |
-
schema="
|
155 |
subset_id="nllb_seed",
|
156 |
),
|
157 |
]
|
@@ -192,15 +192,15 @@ class NLLBSeed(datasets.GeneratorBasedBuilder):
|
|
192 |
}
|
193 |
)
|
194 |
|
195 |
-
# Choose the appropriate
|
196 |
|
197 |
-
# In rare cases you may get a dataset that supports multiple tasks requiring multiple schemas. In that case you can define multiple
|
198 |
|
199 |
-
# For example
|
200 |
-
elif self.config.schema == "
|
201 |
# e.g. features = schemas.kb_features
|
202 |
features = schemas.text2text_features
|
203 |
-
# TODO: Choose your
|
204 |
# raise NotImplementedError()
|
205 |
|
206 |
return datasets.DatasetInfo(
|
@@ -215,7 +215,7 @@ class NLLBSeed(datasets.GeneratorBasedBuilder):
|
|
215 |
"""Returns SplitGenerators."""
|
216 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
217 |
|
218 |
-
# If you need to access the "source" or "
|
219 |
|
220 |
# LOCAL DATASETS: You do not need the dl_manager; you can ignore this argument. Make sure `gen_kwargs` in the return gets passed the right filepath
|
221 |
|
@@ -275,8 +275,8 @@ class NLLBSeed(datasets.GeneratorBasedBuilder):
|
|
275 |
for key, example in thing:
|
276 |
yield key, example
|
277 |
|
278 |
-
elif self.config.schema == "
|
279 |
-
# TODO: yield (key, example) tuples in the
|
280 |
for key, example in thing:
|
281 |
yield key, example
|
282 |
"""
|
@@ -289,7 +289,7 @@ class NLLBSeed(datasets.GeneratorBasedBuilder):
|
|
289 |
}
|
290 |
yield id, row
|
291 |
|
292 |
-
elif self.config.schema == "
|
293 |
for id, (src, tgt) in enumerate(zip(lang_text, eng_text)):
|
294 |
row = {
|
295 |
"id": str(id),
|
@@ -301,13 +301,3 @@ class NLLBSeed(datasets.GeneratorBasedBuilder):
|
|
301 |
yield id, row
|
302 |
else:
|
303 |
raise ValueError(f"Invalid config: {self.config.name}")
|
304 |
-
|
305 |
-
|
306 |
-
# This template is based on the following template from the datasets package:
|
307 |
-
# https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
|
308 |
-
|
309 |
-
|
310 |
-
# This allows you to run your dataloader with `python [dataset_name].py` during development
|
311 |
-
# TODO: Remove this before making your PR
|
312 |
-
if __name__ == "__main__":
|
313 |
-
datasets.load_dataset(__file__)
|
|
|
28 |
|
29 |
TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset.
|
30 |
|
31 |
+
[seacrowd_schema_name] = (kb, pairs, qa, text, t2t, entailment)
|
32 |
"""
|
33 |
import os
|
34 |
from pathlib import Path
|
|
|
36 |
|
37 |
import datasets
|
38 |
|
39 |
+
from seacrowd.utils import schemas
|
40 |
+
from seacrowd.utils.configs import SEACrowdConfig
|
41 |
+
from seacrowd.utils.constants import Tasks
|
42 |
|
43 |
# TODO: Add BibTeX citation
|
44 |
_CITATION = """\
|
|
|
77 |
# For local datasets, this variable can be an empty dictionary.
|
78 |
|
79 |
# For publicly available datasets you will most likely end up passing these URLs to dl_manager in _split_generators.
|
80 |
+
# In most cases the URLs will be the same for the source and seacrowd config.
|
81 |
# However, if you need to access different files for each config you can have multiple entries in this dict.
|
82 |
# This can be an arbitrarily nested dict/list of URLs (see below in `_split_generators` method)
|
83 |
_URLS = {
|
|
|
91 |
# This version doesn't have to be consistent with semantic versioning. Anything that is
|
92 |
# provided by the original dataset as a version goes.
|
93 |
_SOURCE_VERSION = "1.0.0"
|
94 |
+
_SEACROWD_VERSION = "2024.06.20"
|
95 |
_LOCAL = False
|
96 |
|
97 |
|
98 |
+
def seacrowd_config_constructor(lang, schema, version):
|
99 |
if lang == "":
|
100 |
raise ValueError(f"Invalid lang {lang}")
|
101 |
|
102 |
+
if schema != "source" and schema != "seacrowd_t2t":
|
103 |
raise ValueError(f"Invalid schema: {schema}")
|
104 |
|
105 |
+
return SEACrowdConfig(
|
106 |
name="nllb_seed_{lang}_{schema}".format(lang=lang, schema=schema),
|
107 |
version=datasets.Version(version),
|
108 |
description="nllb_seed {schema} schema for {lang} language".format(lang=_LANGUAGE_MAP[lang], schema=schema),
|
|
|
116 |
"""TODO: Short description of my dataset."""
|
117 |
|
118 |
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
|
119 |
+
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
|
120 |
|
121 |
+
# You will be able to load the "source" or "seacrowd" configurations with
|
122 |
# ds_source = datasets.load_dataset('my_dataset', name='source')
|
123 |
+
# ds_seacrowd = datasets.load_dataset('my_dataset', name='seacrowd')
|
124 |
|
125 |
# For local datasets you can make use of the `data_dir` and `data_files` kwargs
|
126 |
# https://huggingface.co/docs/datasets/add_dataset.html#downloading-data-files-and-organizing-splits
|
127 |
# ds_source = datasets.load_dataset('my_dataset', name='source', data_dir="/path/to/data/files")
|
128 |
+
# ds_seacrowd = datasets.load_dataset('my_dataset', name='seacrowd', data_dir="/path/to/data/files")
|
129 |
|
130 |
# TODO: For each dataset, implement Config for Source and Nusantara;
|
131 |
+
# If dataset contains more than one subset (see seacrowd/sea_datasets/smsa.py) implement for EACH of them.
|
132 |
# Each of them should contain:
|
133 |
+
# - name: should be unique for each dataset config eg. smsa_(source|seacrowd)_[seacrowd_schema_name]
|
134 |
+
# - version: option = (SOURCE_VERSION|SEACROWD_VERSION)
|
135 |
# - description: one line description for the dataset
|
136 |
+
# - schema: options = (source|seacrowd_[seacrowd_schema_name])
|
137 |
# - subset_id: subset id is the canonical name for the dataset (eg. smsa)
|
138 |
+
# where [seacrowd_schema_name] = (kb, pairs, qa, text, t2t)
|
139 |
|
140 |
+
BUILDER_CONFIGS = [seacrowd_config_constructor(lang, "source", _SOURCE_VERSION) for lang in _LANGUAGE_MAP] + [seacrowd_config_constructor(lang, "seacrowd_t2t", _SEACROWD_VERSION) for lang in _LANGUAGE_MAP]
|
141 |
"""
|
142 |
BUILDER_CONFIGS = [
|
143 |
+
SEACrowdConfig(
|
144 |
name="nllb_seed_source",
|
145 |
version=SOURCE_VERSION,
|
146 |
description="nllb_seed source schema",
|
147 |
schema="source",
|
148 |
subset_id="nllb_seed",
|
149 |
),
|
150 |
+
SEACrowdConfig(
|
151 |
+
name="nllb_seed_seacrowd_t2t",
|
152 |
+
version=SEACROWD_VERSION,
|
153 |
description="nllb_seed Nusantara schema",
|
154 |
+
schema="seacrowd_t2t",
|
155 |
subset_id="nllb_seed",
|
156 |
),
|
157 |
]
|
|
|
192 |
}
|
193 |
)
|
194 |
|
195 |
+
# Choose the appropriate seacrowd schema for your task and copy it here. You can find information on the schemas in the CONTRIBUTING guide.
|
196 |
|
197 |
+
# In rare cases you may get a dataset that supports multiple tasks requiring multiple schemas. In that case you can define multiple seacrowd configs with a seacrowd_[seacrowd_schema_name] format.
|
198 |
|
199 |
+
# For example seacrowd_kb, seacrowd_t2t
|
200 |
+
elif self.config.schema == "seacrowd_t2t":
|
201 |
# e.g. features = schemas.kb_features
|
202 |
features = schemas.text2text_features
|
203 |
+
# TODO: Choose your seacrowd schema here
|
204 |
# raise NotImplementedError()
|
205 |
|
206 |
return datasets.DatasetInfo(
|
|
|
215 |
"""Returns SplitGenerators."""
|
216 |
# TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
|
217 |
|
218 |
+
# If you need to access the "source" or "seacrowd" config choice, that will be in self.config.name
|
219 |
|
220 |
# LOCAL DATASETS: You do not need the dl_manager; you can ignore this argument. Make sure `gen_kwargs` in the return gets passed the right filepath
|
221 |
|
|
|
275 |
for key, example in thing:
|
276 |
yield key, example
|
277 |
|
278 |
+
elif self.config.schema == "seacrowd_t2t":
|
279 |
+
# TODO: yield (key, example) tuples in the seacrowd schema
|
280 |
for key, example in thing:
|
281 |
yield key, example
|
282 |
"""
|
|
|
289 |
}
|
290 |
yield id, row
|
291 |
|
292 |
+
elif self.config.schema == "seacrowd_t2t":
|
293 |
for id, (src, tgt) in enumerate(zip(lang_text, eng_text)):
|
294 |
row = {
|
295 |
"id": str(id),
|
|
|
301 |
yield id, row
|
302 |
else:
|
303 |
raise ValueError(f"Invalid config: {self.config.name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|