Datasets:

Modalities:
Text
Formats:
parquet
Libraries:
Datasets
pandas
License:
baber commited on
Commit
619386f
·
verified ·
1 Parent(s): e748e2b

Delete drop.py

Browse files
Files changed (1) hide show
  1. drop.py +0 -192
drop.py DELETED
@@ -1,192 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- #
15
- # Custom DROP dataset that, unlike HF, keeps all question-answer pairs
16
- # even if there are multiple types of answers for the same question.
17
- """DROP dataset."""
18
-
19
-
20
- import json
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _CITATION = """\
27
- @misc{dua2019drop,
28
- title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},
29
- author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},
30
- year={2019},
31
- eprint={1903.00161},
32
- archivePrefix={arXiv},
33
- primaryClass={cs.CL}
34
- }
35
- """
36
-
37
- _DESCRIPTION = """\
38
- DROP is a QA dataset which tests comprehensive understanding of paragraphs. In
39
- this crowdsourced, adversarially-created, 96k question-answering benchmark, a
40
- system must resolve multiple references in a question, map them onto a paragraph,
41
- and perform discrete operations over them (such as addition, counting, or sorting).
42
- """
43
-
44
- _HOMEPAGE = "https://allenai.org/data/drop"
45
-
46
- # License declared at https://allenai.org/data/drop
47
- _LICENSE = "CC BY 4.0"
48
-
49
- _URLS = {
50
- "drop": "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip",
51
- }
52
-
53
- _EMPTY_VALIDATED_ANSWER = [
54
- {
55
- "number": "",
56
- "date": {
57
- "day": "",
58
- "month": "",
59
- "year": "",
60
- },
61
- "spans": [],
62
- "worker_id": "",
63
- "hit_id": "",
64
- }
65
- ]
66
-
67
-
68
- class Drop(datasets.GeneratorBasedBuilder):
69
- """DROP is a QA dataset which tests comprehensive understanding of paragraphs."""
70
-
71
- VERSION = datasets.Version("0.0.1")
72
-
73
- BUILDER_CONFIGS = [
74
- datasets.BuilderConfig(
75
- name="drop", version=VERSION, description="The DROP dataset."
76
- ),
77
- ]
78
-
79
- def _info(self):
80
- features = datasets.Features(
81
- {
82
- "section_id": datasets.Value("string"),
83
- "passage": datasets.Value("string"),
84
- "question": datasets.Value("string"),
85
- "query_id": datasets.Value("string"),
86
- "answer": {
87
- "number": datasets.Value("string"),
88
- "date": {
89
- "day": datasets.Value("string"),
90
- "month": datasets.Value("string"),
91
- "year": datasets.Value("string"),
92
- },
93
- "spans": datasets.features.Sequence(datasets.Value("string")),
94
- "worker_id": datasets.Value("string"),
95
- "hit_id": datasets.Value("string"),
96
- },
97
- "validated_answers": datasets.features.Sequence(
98
- {
99
- "number": datasets.Value("string"),
100
- "date": {
101
- "day": datasets.Value("string"),
102
- "month": datasets.Value("string"),
103
- "year": datasets.Value("string"),
104
- },
105
- "spans": datasets.features.Sequence(datasets.Value("string")),
106
- "worker_id": datasets.Value("string"),
107
- "hit_id": datasets.Value("string"),
108
- }
109
- ),
110
- }
111
- )
112
- return datasets.DatasetInfo(
113
- description=_DESCRIPTION,
114
- features=features,
115
- homepage=_HOMEPAGE,
116
- license=_LICENSE,
117
- citation=_CITATION,
118
- )
119
-
120
- def _split_generators(self, dl_manager):
121
- urls = _URLS[self.config.name]
122
- data_dir = dl_manager.download_and_extract(urls)
123
- return [
124
- datasets.SplitGenerator(
125
- name=datasets.Split.TRAIN,
126
- # These kwargs will be passed to _generate_examples
127
- gen_kwargs={
128
- "filepath": os.path.join(
129
- data_dir, "drop_dataset", "drop_dataset_train.json"
130
- ),
131
- "split": "train",
132
- },
133
- ),
134
- datasets.SplitGenerator(
135
- name=datasets.Split.VALIDATION,
136
- # These kwargs will be passed to _generate_examples
137
- gen_kwargs={
138
- "filepath": os.path.join(
139
- data_dir, "drop_dataset", "drop_dataset_dev.json"
140
- ),
141
- "split": "validation",
142
- },
143
- ),
144
- ]
145
-
146
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
147
- def _generate_examples(self, filepath, split):
148
- with open(filepath, encoding="utf-8") as f:
149
- data = json.load(f)
150
- key = 0
151
- for section_id, example in data.items():
152
- # Each example (passage) has multiple sub-question-answer pairs.
153
- for qa in example["qa_pairs"]:
154
- # Build answer.
155
- answer = qa["answer"]
156
- answer = {
157
- "number": answer["number"],
158
- "date": {
159
- "day": answer["date"].get("day", ""),
160
- "month": answer["date"].get("month", ""),
161
- "year": answer["date"].get("year", ""),
162
- },
163
- "spans": answer["spans"],
164
- "worker_id": answer.get("worker_id", ""),
165
- "hit_id": answer.get("hit_id", ""),
166
- }
167
- validated_answers = []
168
- if "validated_answers" in qa:
169
- for validated_answer in qa["validated_answers"]:
170
- va = {
171
- "number": validated_answer.get("number", ""),
172
- "date": {
173
- "day": validated_answer["date"].get("day", ""),
174
- "month": validated_answer["date"].get("month", ""),
175
- "year": validated_answer["date"].get("year", ""),
176
- },
177
- "spans": validated_answer.get("spans", ""),
178
- "worker_id": validated_answer.get("worker_id", ""),
179
- "hit_id": validated_answer.get("hit_id", ""),
180
- }
181
- validated_answers.append(va)
182
- else:
183
- validated_answers = _EMPTY_VALIDATED_ANSWER
184
- yield key, {
185
- "section_id": section_id,
186
- "passage": example["passage"],
187
- "question": qa["question"],
188
- "query_id": qa["query_id"],
189
- "answer": answer,
190
- "validated_answers": validated_answers,
191
- }
192
- key += 1