Datasets:

ArXiv:
License:
holylovenia commited on
Commit
fd45515
1 Parent(s): 8132af1

Upload aya_collection_translated.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. aya_collection_translated.py +195 -0
aya_collection_translated.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @misc{singh2024aya,
13
+ title={Aya Dataset: An Open-Access Collection for Multilingual Instruction Tuning},
14
+ author={Shivalika Singh and Freddie Vargus and Daniel Dsouza and Börje F. Karlsson and
15
+ Abinaya Mahendiran and Wei-Yin Ko and Herumb Shandilya and Jay Patel and Deividas
16
+ Mataciunas and Laura OMahony and Mike Zhang and Ramith Hettiarachchi and Joseph
17
+ Wilson and Marina Machado and Luisa Souza Moura and Dominik Krzemiński and Hakimeh
18
+ Fadaei and Irem Ergün and Ifeoma Okoh and Aisha Alaagib and Oshan Mudannayake and
19
+ Zaid Alyafeai and Vu Minh Chien and Sebastian Ruder and Surya Guthikonda and Emad A.
20
+ Alghamdi and Sebastian Gehrmann and Niklas Muennighoff and Max Bartolo and Julia Kreutzer
21
+ and Ahmet Üstün and Marzieh Fadaee and Sara Hooker},
22
+ year={2024},
23
+ eprint={2402.06619},
24
+ archivePrefix={arXiv},
25
+ primaryClass={cs.CL}
26
+ }
27
+ """
28
+
29
+ _DATASETNAME = "aya_collection_translated"
30
+
31
+ _DESCRIPTION = """
32
+ The Aya Collection is a massive multilingual collection consisting of 513 million instances of prompts and
33
+ completions covering a wide range of tasks. This dataset covers the translated prompts of the Aya Collection.
34
+ """
35
+
36
+ _HOMEPAGE = "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split"
37
+
38
+ _LANGUAGES = ["ceb", "tha", "mya", "zsm", "jav", "ind", "vie", "sun", "ace", "bjn", "khm", "lao", "min"]
39
+
40
+ _LICENSE = Licenses.APACHE_2_0.value
41
+
42
+ _LOCAL = False
43
+
44
+ _URLS = {
45
+ "ceb": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/cebuano",
46
+ "tha": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/thai",
47
+ "mya": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/burmese",
48
+ "zsm": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/malayalam",
49
+ "jav": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/javanese",
50
+ "ind": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/indonesian",
51
+ "vie": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/vietnamese",
52
+ "sun": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/sundanese",
53
+ "ace": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/achinese",
54
+ "bjn": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/banjar",
55
+ "khm": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/central_khmer",
56
+ "lao": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/lao",
57
+ "min": "https://huggingface.co/datasets/CohereForAI/aya_collection_language_split/resolve/main/minangkabau",
58
+ }
59
+
60
+ _SUPPORTED_TASKS = [Tasks.INSTRUCTION_TUNING]
61
+
62
+ _SOURCE_VERSION = "1.0.0"
63
+
64
+ _SEACROWD_VERSION = "2024.06.20"
65
+
66
+
67
+ class AyaCollectionTranslatedDataset(datasets.GeneratorBasedBuilder):
68
+ """
69
+ The Aya Collection is a massive multilingual collection consisting of 513 million instances of prompts and
70
+ completions covering a wide range of tasks. This dataset covers the translated prompts of the Aya Collection.
71
+ """
72
+
73
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
74
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
75
+
76
+ BUILDER_CONFIGS = [
77
+ SEACrowdConfig(
78
+ name=f"{_DATASETNAME}_{LANG}_source",
79
+ version=datasets.Version(_SOURCE_VERSION),
80
+ description=f"{_DATASETNAME} {LANG} source schema",
81
+ schema="source",
82
+ subset_id=f"{_DATASETNAME}_{LANG}",
83
+ )
84
+ for LANG in _LANGUAGES
85
+ ] + [
86
+ SEACrowdConfig(
87
+ name=f"{_DATASETNAME}_{LANG}_seacrowd_t2t",
88
+ version=datasets.Version(_SEACROWD_VERSION),
89
+ description=f"{_DATASETNAME} {LANG} SEACrowd schema",
90
+ schema="seacrowd_t2t",
91
+ subset_id=f"{_DATASETNAME}_{LANG}",
92
+ )
93
+ for LANG in _LANGUAGES
94
+ ]
95
+
96
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_ind_source"
97
+
98
+ def _info(self) -> datasets.DatasetInfo:
99
+
100
+ if self.config.schema == "source":
101
+ features = datasets.Features(
102
+ {
103
+ "id": datasets.Value("int64"),
104
+ "inputs": datasets.Value("string"),
105
+ "targets": datasets.Value("string"),
106
+ "dataset_name": datasets.Value("string"),
107
+ "sub_dataset_name": datasets.Value("string"),
108
+ "task_type": datasets.Value("string"),
109
+ "template_id": datasets.Value("int64"),
110
+ "language": datasets.Value("string"),
111
+ "script": datasets.Value("string"),
112
+ "split": datasets.Value("string"),
113
+ }
114
+ )
115
+
116
+ elif self.config.schema == "seacrowd_t2t":
117
+ features = schemas.text2text_features
118
+
119
+ return datasets.DatasetInfo(
120
+ description=_DESCRIPTION,
121
+ features=features,
122
+ homepage=_HOMEPAGE,
123
+ license=_LICENSE,
124
+ citation=_CITATION,
125
+ )
126
+
127
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
128
+ """Returns SplitGenerators."""
129
+
130
+ language = self.config.name.split("_")[3]
131
+
132
+ if language in _LANGUAGES:
133
+ data_train_paths = []
134
+ for version in [0, 1, 2]:
135
+ for all in [1, 2, 3]:
136
+ if version >= all:
137
+ continue
138
+ else:
139
+ try:
140
+ data_train_path = Path(dl_manager.download_and_extract(f"{_URLS[language]}/train-0000{version}-of-0000{all}.parquet?download=true"))
141
+ data_train_paths.append(data_train_path)
142
+ except Exception:
143
+ continue
144
+
145
+ data_validation_path = Path(dl_manager.download_and_extract(f"{_URLS[language]}/validation-00000-of-00001.parquet?download=true"))
146
+ data_test_path = Path(dl_manager.download_and_extract(f"{_URLS[language]}/test-00000-of-00001.parquet?download=true"))
147
+
148
+ return [
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TRAIN,
151
+ gen_kwargs={
152
+ "filepath": data_train_paths,
153
+ "split": "train",
154
+ },
155
+ ),
156
+ datasets.SplitGenerator(
157
+ name=datasets.Split.TEST,
158
+ gen_kwargs={
159
+ "filepath": data_test_path,
160
+ "split": "test",
161
+ },
162
+ ),
163
+ datasets.SplitGenerator(
164
+ name=datasets.Split.VALIDATION,
165
+ gen_kwargs={
166
+ "filepath": data_validation_path,
167
+ "split": "dev",
168
+ },
169
+ ),
170
+ ]
171
+
172
+ def _generate_examples(self, filepath, split: str) -> Tuple[int, Dict]:
173
+ """Yields examples as (key, example) tuples."""
174
+
175
+ if isinstance(filepath, Path):
176
+ dfs = [pd.read_parquet(filepath)]
177
+ else:
178
+ dfs = [pd.read_parquet(path) for path in filepath]
179
+
180
+ df = pd.concat(dfs, ignore_index=True)
181
+
182
+ for index, row in df.iterrows():
183
+ if self.config.schema == "source":
184
+ example = row.to_dict()
185
+
186
+ elif self.config.schema == "seacrowd_t2t":
187
+ example = {
188
+ "id": str(index),
189
+ "text_1": row["inputs"],
190
+ "text_2": row["targets"],
191
+ "text_1_name": "inputs",
192
+ "text_2_name": "targets",
193
+ }
194
+
195
+ yield index, example