Datasets:

Modalities:
Image
Text
Formats:
parquet
DOI:
Libraries:
Datasets
Dask
License:
abumafrim commited on
Commit
33a9c88
·
1 Parent(s): e0ba032

Uploading data builder

Browse files
Files changed (2) hide show
  1. HausaVQA.py +128 -0
  2. README.md +26 -0
HausaVQA.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """HaVQA: A Dataset for Visual Question Answering and Multimodal Research in Hausa Language."""
16
+
17
+
18
+ import os
19
+ import datasets
20
+ from tqdm import tqdm
21
+
22
+ _CITATION = """\
23
+ @inproceedings{parida-etal-2023-havqa,
24
+ title = "{H}a{VQA}: A Dataset for Visual Question Answering and Multimodal Research in {H}ausa Language",
25
+ author = "Parida, Shantipriya and
26
+ Abdulmumin, Idris and
27
+ Muhammad, Shamsuddeen Hassan and
28
+ Bose, Aneesh and
29
+ Kohli, Guneet Singh and
30
+ Ahmad, Ibrahim Said and
31
+ Kotwal, Ketan and
32
+ Deb Sarkar, Sayan and
33
+ Bojar, Ond{\v{r}}ej and
34
+ Kakudi, Habeebah",
35
+ editor = "Rogers, Anna and
36
+ Boyd-Graber, Jordan and
37
+ Okazaki, Naoaki",
38
+ booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
39
+ month = jul,
40
+ year = "2023",
41
+ address = "Toronto, Canada",
42
+ publisher = "Association for Computational Linguistics",
43
+ url = "https://aclanthology.org/2023.findings-acl.646",
44
+ doi = "10.18653/v1/2023.findings-acl.646",
45
+ pages = "10162--10183"
46
+ }
47
+ """
48
+
49
+
50
+ _DESCRIPTION = """\
51
+ This paper presents “HaVQA”, the first multimodal dataset for visual question answering (VQA) tasks in the Hausa language. The dataset was created by manually translating 6,022 English question-answer pairs, which are associated with 1,555 unique images from the Visual Genome dataset. As a result, the dataset provides 12,044 gold standard English-Hausa parallel sentences that were translated in a fashion that guarantees their semantic match with the corresponding visual information. We conducted several baseline experiments on the dataset, including visual question answering, visual question elicitation, text-only and multimodal machine translation.
52
+ """
53
+
54
+ _HOMEPAGE = "https://github.com/abumafrim/HausaVQA"
55
+ _LICENSE = "https://creativecommons.org/licenses/by-nc-sa/4.0/"
56
+ _URL = "https://raw.githubusercontent.com/abumafrim/HausaVQA/main/"
57
+
58
+ class HausaVQAClass(datasets.GeneratorBasedBuilder):
59
+ """HausaVQA dataset"""
60
+
61
+ VERSION = datasets.Version("1.0.0")
62
+
63
+ def _info(self):
64
+ features = datasets.Features(
65
+ {
66
+ "image": datasets.Image(),
67
+ "qa_id": datasets.Value("int32"),
68
+ "ques_en": datasets.Value("string"),
69
+ "ans_en": datasets.Value("string"),
70
+ "ques_ha": datasets.Value("string"),
71
+ "ans_ha": datasets.Value("string")
72
+ }
73
+ )
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=features,
77
+ homepage=_HOMEPAGE,
78
+ license=_LICENSE,
79
+ citation=_CITATION,
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ train_data = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-train.txt"))
84
+ valid_data = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-dev.txt"))
85
+ test_data = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-test.txt"))
86
+
87
+ image_ids = set()
88
+ for filepath in [train_data, valid_data, test_data]:
89
+ with open(filepath, encoding="utf-8") as f:
90
+ data = f.read().splitlines()
91
+ for item in data:
92
+ image_id, _, _, _, _, _ = item.split("\t")
93
+ image_ids.add(image_id)
94
+
95
+ images_path = {}
96
+ for id in tqdm(image_ids, desc="Downloading images"):
97
+ images_path[id] = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-images", id + '.jpg'))
98
+
99
+ return [
100
+ datasets.SplitGenerator(
101
+ name=datasets.Split.TRAIN,
102
+ gen_kwargs={
103
+ "filepath": train_data,
104
+ "image_paths": images_path
105
+ },
106
+ ),
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.VALIDATION,
109
+ gen_kwargs={
110
+ "filepath": valid_data,
111
+ "image_paths": images_path
112
+ },
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ gen_kwargs={
117
+ "filepath": test_data,
118
+ "image_paths": images_path
119
+ },
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, filepath, image_paths):
124
+ with open(filepath, encoding="utf-8") as f:
125
+ data = f.read().splitlines()
126
+ for row, item in enumerate(data):
127
+ image_id, qa_id, ques_en, ans_en, ques_ha, ans_ha = item.split("\t")
128
+ yield row, {"image": image_paths[image_id], "qa_id": qa_id, "ques_en": ques_en, "ans_en": ans_en, "ques_ha": ques_ha, "ans_ha": ans_ha}
README.md CHANGED
@@ -1,3 +1,29 @@
1
  ---
2
  license: cc-by-sa-4.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  ---
 
1
  ---
2
  license: cc-by-sa-4.0
3
+ dataset_info:
4
+ features:
5
+ - name: image
6
+ dtype: image
7
+ - name: qa_id
8
+ dtype: int32
9
+ - name: ques_en
10
+ dtype: string
11
+ - name: ans_en
12
+ dtype: string
13
+ - name: ques_ha
14
+ dtype: string
15
+ - name: ans_ha
16
+ dtype: string
17
+ splits:
18
+ - name: train
19
+ num_bytes: 1068111
20
+ num_examples: 4816
21
+ - name: validation
22
+ num_bytes: 132120
23
+ num_examples: 602
24
+ - name: test
25
+ num_bytes: 131229
26
+ num_examples: 602
27
+ download_size: 205165688
28
+ dataset_size: 1331460
29
  ---