Datasets:

Modalities:
Image
Text
Formats:
parquet
DOI:
Libraries:
Datasets
Dask
License:
abumafrim commited on
Commit
d3eba4a
·
1 Parent(s): 33a9c88

uploaded data

Browse files
HausaVQA.py DELETED
@@ -1,128 +0,0 @@
1
- # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """HaVQA: A Dataset for Visual Question Answering and Multimodal Research in Hausa Language."""
16
-
17
-
18
- import os
19
- import datasets
20
- from tqdm import tqdm
21
-
22
- _CITATION = """\
23
- @inproceedings{parida-etal-2023-havqa,
24
- title = "{H}a{VQA}: A Dataset for Visual Question Answering and Multimodal Research in {H}ausa Language",
25
- author = "Parida, Shantipriya and
26
- Abdulmumin, Idris and
27
- Muhammad, Shamsuddeen Hassan and
28
- Bose, Aneesh and
29
- Kohli, Guneet Singh and
30
- Ahmad, Ibrahim Said and
31
- Kotwal, Ketan and
32
- Deb Sarkar, Sayan and
33
- Bojar, Ond{\v{r}}ej and
34
- Kakudi, Habeebah",
35
- editor = "Rogers, Anna and
36
- Boyd-Graber, Jordan and
37
- Okazaki, Naoaki",
38
- booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
39
- month = jul,
40
- year = "2023",
41
- address = "Toronto, Canada",
42
- publisher = "Association for Computational Linguistics",
43
- url = "https://aclanthology.org/2023.findings-acl.646",
44
- doi = "10.18653/v1/2023.findings-acl.646",
45
- pages = "10162--10183"
46
- }
47
- """
48
-
49
-
50
- _DESCRIPTION = """\
51
- This paper presents “HaVQA”, the first multimodal dataset for visual question answering (VQA) tasks in the Hausa language. The dataset was created by manually translating 6,022 English question-answer pairs, which are associated with 1,555 unique images from the Visual Genome dataset. As a result, the dataset provides 12,044 gold standard English-Hausa parallel sentences that were translated in a fashion that guarantees their semantic match with the corresponding visual information. We conducted several baseline experiments on the dataset, including visual question answering, visual question elicitation, text-only and multimodal machine translation.
52
- """
53
-
54
- _HOMEPAGE = "https://github.com/abumafrim/HausaVQA"
55
- _LICENSE = "https://creativecommons.org/licenses/by-nc-sa/4.0/"
56
- _URL = "https://raw.githubusercontent.com/abumafrim/HausaVQA/main/"
57
-
58
- class HausaVQAClass(datasets.GeneratorBasedBuilder):
59
- """HausaVQA dataset"""
60
-
61
- VERSION = datasets.Version("1.0.0")
62
-
63
- def _info(self):
64
- features = datasets.Features(
65
- {
66
- "image": datasets.Image(),
67
- "qa_id": datasets.Value("int32"),
68
- "ques_en": datasets.Value("string"),
69
- "ans_en": datasets.Value("string"),
70
- "ques_ha": datasets.Value("string"),
71
- "ans_ha": datasets.Value("string")
72
- }
73
- )
74
- return datasets.DatasetInfo(
75
- description=_DESCRIPTION,
76
- features=features,
77
- homepage=_HOMEPAGE,
78
- license=_LICENSE,
79
- citation=_CITATION,
80
- )
81
-
82
- def _split_generators(self, dl_manager):
83
- train_data = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-train.txt"))
84
- valid_data = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-dev.txt"))
85
- test_data = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-test.txt"))
86
-
87
- image_ids = set()
88
- for filepath in [train_data, valid_data, test_data]:
89
- with open(filepath, encoding="utf-8") as f:
90
- data = f.read().splitlines()
91
- for item in data:
92
- image_id, _, _, _, _, _ = item.split("\t")
93
- image_ids.add(image_id)
94
-
95
- images_path = {}
96
- for id in tqdm(image_ids, desc="Downloading images"):
97
- images_path[id] = dl_manager.download(os.path.join(_URL, "hausa-visual-question-answer-images", id + '.jpg'))
98
-
99
- return [
100
- datasets.SplitGenerator(
101
- name=datasets.Split.TRAIN,
102
- gen_kwargs={
103
- "filepath": train_data,
104
- "image_paths": images_path
105
- },
106
- ),
107
- datasets.SplitGenerator(
108
- name=datasets.Split.VALIDATION,
109
- gen_kwargs={
110
- "filepath": valid_data,
111
- "image_paths": images_path
112
- },
113
- ),
114
- datasets.SplitGenerator(
115
- name=datasets.Split.TEST,
116
- gen_kwargs={
117
- "filepath": test_data,
118
- "image_paths": images_path
119
- },
120
- ),
121
- ]
122
-
123
- def _generate_examples(self, filepath, image_paths):
124
- with open(filepath, encoding="utf-8") as f:
125
- data = f.read().splitlines()
126
- for row, item in enumerate(data):
127
- image_id, qa_id, ques_en, ans_en, ques_ha, ans_ha = item.split("\t")
128
- yield row, {"image": image_paths[image_id], "qa_id": qa_id, "ques_en": ques_en, "ans_en": ans_en, "ques_ha": ques_ha, "ans_ha": ans_ha}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/hausa_vqa-test.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00641f91af739e69b450c4781ef786d85c2c45f6768dd066237736904e134a1e
3
+ size 132672
data/hausa_vqa-train.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26fecae317af8056d7dbf147421a414e346c4527f69e2c6e793d92c272300127
3
+ size 1072064
data/hausa_vqa-validation.arrow ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f3ad146603bd52fe4eb40a8ddbf1fcbf78163857390eef9f3813c5ef3c1ae7a
3
+ size 133552
dataset_info.json ADDED
The diff for this file is too large to render. See raw diff