Datasets:

Languages:
Vietnamese
ArXiv:
License:
holylovenia commited on
Commit
fd85fe0
·
verified ·
1 Parent(s): d9111ea

Upload uit_viwikiqa.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. uit_viwikiqa.py +199 -0
uit_viwikiqa.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ from pathlib import Path
18
+ from typing import Dict, List, Tuple
19
+
20
+ import datasets
21
+
22
+ from seacrowd.utils.configs import SEACrowdConfig
23
+ from seacrowd.utils.constants import (SCHEMA_TO_FEATURES, TASK_TO_SCHEMA,
24
+ Licenses, Tasks)
25
+
26
+ _CITATION = """\
27
+ @misc{do2021sentence,
28
+ title={Sentence Extraction-Based Machine Reading Comprehension for Vietnamese},
29
+ author={Phong Nguyen-Thuan Do and Nhat Duy Nguyen and Tin Van Huynh and Kiet Van Nguyen and Anh Gia-Tuan Nguyen and Ngan Luu-Thuy Nguyen},
30
+ year={2021},
31
+ eprint={2105.09043},
32
+ archivePrefix={arXiv},
33
+ primaryClass={cs.CL}
34
+ }
35
+ """
36
+
37
+ _DATASETNAME = "uit_viwikiqa"
38
+
39
+ _DESCRIPTION = """
40
+ UIT-ViWikiQA is a Vietnamese sentence extraction-based machine reading comprehension
41
+ dataset. It is created from the UIT-ViQuAD dataset. It comprises of 23,074
42
+ question-answers based on 5,109 passages of 174 Wikipedia Vietnamese articles.
43
+ """
44
+
45
+ _HOMEPAGE = "https://sites.google.com/uit.edu.vn/kietnv/datasets#h.bp2c6hj2hb5q"
46
+
47
+ _LANGUAGES = ["vie"]
48
+
49
+ _LICENSE = f"""{Licenses.OTHERS.value} |
50
+ The user of UIT-ViWikiQA developed by the NLP@UIT research group must respect the
51
+ following terms and conditions:
52
+ 1. The dataset is only used for non-profit research for image captioning.
53
+ 2. The dataset is not allowed to be used in commercial systems.
54
+ 3. Do not redistribute the dataset. This dataset may be modified or improved to serve a
55
+ research purpose better, but the edited dataset may not be distributed.
56
+ 4. Summaries, analyses, and interpretations of the properties of the dataset may be
57
+ derived and published, provided it is not possible to reconstruct the information from
58
+ these summaries.
59
+ 5. Published research works that use the dataset must cite the following paper: Do,
60
+ P.N.T., Nguyen, N.D., Van Huynh, T., Van Nguyen, K., Nguyen, A.G.T. and Nguyen, N.L.T.,
61
+ 2021. Sentence Extraction-Based Machine Reading Comprehension for Vietnamese. arXiv
62
+ preprint arXiv:2105.09043.
63
+ """
64
+
65
+ _LOCAL = True # need to signed a user agreement, see _HOMEPAGE
66
+
67
+ _URLS = {} # local dataset
68
+
69
+ _SUPPORTED_TASKS = [Tasks.QUESTION_ANSWERING]
70
+ _SEACROWD_SCHEMA = f"seacrowd_{TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]].lower()}" # qa
71
+
72
+ _SOURCE_VERSION = "1.0.0"
73
+
74
+ _SEACROWD_VERSION = "2024.06.20"
75
+
76
+
77
+ class UITViWikiQADataset(datasets.GeneratorBasedBuilder):
78
+ """Vietnamese sentence extraction-based machine reading comprehension dataset from UIT-ViQuAD dataset"""
79
+
80
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
81
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
82
+
83
+ BUILDER_CONFIGS = [
84
+ SEACrowdConfig(
85
+ name=f"{_DATASETNAME}_source",
86
+ version=SOURCE_VERSION,
87
+ description=f"{_DATASETNAME} source schema",
88
+ schema="source",
89
+ subset_id=_DATASETNAME,
90
+ ),
91
+ SEACrowdConfig(
92
+ name=f"{_DATASETNAME}_{_SEACROWD_SCHEMA}",
93
+ version=SEACROWD_VERSION,
94
+ description=f"{_DATASETNAME} SEACrowd schema",
95
+ schema=_SEACROWD_SCHEMA,
96
+ subset_id=_DATASETNAME,
97
+ ),
98
+ ]
99
+
100
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_source"
101
+
102
+ def _info(self) -> datasets.DatasetInfo:
103
+ if self.config.schema == "source":
104
+ features = datasets.Features(
105
+ {
106
+ "id": datasets.Value("string"),
107
+ "title": datasets.Value("string"),
108
+ "context": datasets.Value("string"),
109
+ "question": datasets.Value("string"),
110
+ "answers": datasets.Sequence(
111
+ {
112
+ "text": datasets.Value("string"),
113
+ "answer_start": datasets.Value("int32"),
114
+ }
115
+ ),
116
+ }
117
+ )
118
+ elif self.config.schema == _SEACROWD_SCHEMA:
119
+ features = SCHEMA_TO_FEATURES[TASK_TO_SCHEMA[_SUPPORTED_TASKS[0]]] # qa_features
120
+ features["meta"] = {
121
+ "title": datasets.Value("string"),
122
+ "answers_start": datasets.Sequence(datasets.Value("int32")),
123
+ }
124
+
125
+ return datasets.DatasetInfo(
126
+ description=_DESCRIPTION,
127
+ features=features,
128
+ homepage=_HOMEPAGE,
129
+ license=_LICENSE,
130
+ citation=_CITATION,
131
+ )
132
+
133
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
134
+ """Returns SplitGenerators."""
135
+ if self.config.data_dir is None:
136
+ raise ValueError("This is a local dataset. Please pass the `data_dir` kwarg (where the .json is located) to load_dataset.")
137
+ else:
138
+ data_dir = Path(self.config.data_dir)
139
+
140
+ return [
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.TRAIN,
143
+ # Whatever you put in gen_kwargs will be passed to _generate_examples
144
+ gen_kwargs={
145
+ "file_path": data_dir / "train_ViWikiQA.json",
146
+ },
147
+ ),
148
+ datasets.SplitGenerator(
149
+ name=datasets.Split.VALIDATION,
150
+ gen_kwargs={
151
+ "file_path": data_dir / "dev_ViWikiQA.json",
152
+ },
153
+ ),
154
+ datasets.SplitGenerator(
155
+ name=datasets.Split.TEST,
156
+ gen_kwargs={
157
+ "file_path": data_dir / "test_ViWikiQA.json",
158
+ },
159
+ ),
160
+ ]
161
+
162
+ def _generate_examples(self, file_path: Path) -> Tuple[int, Dict]:
163
+ """Yields examples as (key, example) tuples."""
164
+ with open(file_path, "r", encoding="utf-8") as file:
165
+ data = json.load(file)
166
+
167
+ key = 0
168
+ for example in data["data"]:
169
+
170
+ if self.config.schema == "source":
171
+ for paragraph in example["paragraphs"]:
172
+ for qa in paragraph["qas"]:
173
+ yield key, {
174
+ "id": qa["id"],
175
+ "title": example["title"],
176
+ "context": paragraph["context"],
177
+ "question": qa["question"],
178
+ "answers": qa["answers"],
179
+ }
180
+ key += 1
181
+
182
+ elif self.config.schema == _SEACROWD_SCHEMA:
183
+ for paragraph in example["paragraphs"]:
184
+ for qa in paragraph["qas"]:
185
+ yield key, {
186
+ "id": str(key),
187
+ "question_id": qa["id"],
188
+ "document_id": None,
189
+ "question": qa["question"],
190
+ "type": None,
191
+ "choices": [], # escape multiple choice qa seacrowd test
192
+ "context": paragraph["context"],
193
+ "answer": [answer["text"] for answer in qa["answers"]],
194
+ "meta": {
195
+ "title": example["title"],
196
+ "answers_start": [answer["answer_start"] for answer in qa["answers"]],
197
+ },
198
+ }
199
+ key += 1