Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
7603457
1 Parent(s): 210a4d2

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (ffd96bdfa1890b305a8f6d10b8bad0aadac5fc1c)
- Delete loading script (1251ea30cab7d811a89a9810b280dd0ab2c9f9fd)
- Delete legacy dataset_infos.json (c9e8b5f6725ede38f0fc0c9a36cb85d46a9fe168)

README.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- pretty_name: DROP
3
  annotations_creators:
4
  - crowdsourced
5
  language_creators:
@@ -21,6 +20,7 @@ task_ids:
21
  - extractive-qa
22
  - abstractive-qa
23
  paperswithcode_id: drop
 
24
  dataset_info:
25
  features:
26
  - name: section_id
@@ -39,13 +39,20 @@ dataset_info:
39
  dtype: string
40
  splits:
41
  - name: train
42
- num_bytes: 105572762
43
  num_examples: 77400
44
  - name: validation
45
- num_bytes: 11737787
46
  num_examples: 9535
47
- download_size: 8308692
48
- dataset_size: 117310549
 
 
 
 
 
 
 
49
  ---
50
 
51
  # Dataset Card for "drop"
 
1
  ---
 
2
  annotations_creators:
3
  - crowdsourced
4
  language_creators:
 
20
  - extractive-qa
21
  - abstractive-qa
22
  paperswithcode_id: drop
23
+ pretty_name: DROP
24
  dataset_info:
25
  features:
26
  - name: section_id
 
39
  dtype: string
40
  splits:
41
  - name: train
42
+ num_bytes: 105572506
43
  num_examples: 77400
44
  - name: validation
45
+ num_bytes: 11737755
46
  num_examples: 9535
47
+ download_size: 11538387
48
+ dataset_size: 117310261
49
+ configs:
50
+ - config_name: default
51
+ data_files:
52
+ - split: train
53
+ path: data/train-*
54
+ - split: validation
55
+ path: data/validation-*
56
  ---
57
 
58
  # Dataset Card for "drop"
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abb5e578c2156a61f83b56c066a922ec1c7c5140638a3f0f2a7c348fafe1cb35
3
+ size 10333127
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f9a3bdbb1b5909abfa25cbab693f89f47568c98e6e03473500d604f044c8f68
3
+ size 1205260
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.\n. DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a\nquestion, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or\n sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was\n necessary for prior datasets.\n", "citation": "@inproceedings{Dua2019DROP,\n author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},\n title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},\n booktitle={Proc. of NAACL},\n year={2019}\n}\n", "homepage": "https://allennlp.org/drop", "license": "", "features": {"section_id": {"dtype": "string", "id": null, "_type": "Value"}, "query_id": {"dtype": "string", "id": null, "_type": "Value"}, "passage": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers_spans": {"feature": {"spans": {"dtype": "string", "id": null, "_type": "Value"}, "types": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "builder_name": "drop", "config_name": "default", "version": {"version_str": "0.1.0", "description": null, "major": 0, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 105572762, "num_examples": 77400, "dataset_name": "drop"}, "validation": {"name": "validation", "num_bytes": 11737787, "num_examples": 9535, "dataset_name": "drop"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip": {"num_bytes": 8308692, "checksum": "39d2278a29fd729de301b111a45f434c24834f40df8f4ff116d864589e3249d6"}}, "download_size": 8308692, "post_processing_size": null, "dataset_size": 117310549, "size_in_bytes": 125619241}}
 
 
drop.py DELETED
@@ -1,202 +0,0 @@
1
- """TODO(drop): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- _CITATION = """\
11
- @inproceedings{Dua2019DROP,
12
- author={Dheeru Dua and Yizhong Wang and Pradeep Dasigi and Gabriel Stanovsky and Sameer Singh and Matt Gardner},
13
- title={DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs},
14
- booktitle={Proc. of NAACL},
15
- year={2019}
16
- }
17
- """
18
-
19
- _DESCRIPTION = """\
20
- DROP: A Reading Comprehension Benchmark Requiring Discrete Reasoning Over Paragraphs.
21
- . DROP is a crowdsourced, adversarially-created, 96k-question benchmark, in which a system must resolve references in a
22
- question, perhaps to multiple input positions, and perform discrete operations over them (such as addition, counting, or
23
- sorting). These operations require a much more comprehensive understanding of the content of paragraphs than what was
24
- necessary for prior datasets.
25
- """
26
- _URL = "https://s3-us-west-2.amazonaws.com/allennlp/datasets/drop/drop_dataset.zip"
27
-
28
-
29
- class AnswerParsingError(Exception):
30
- pass
31
-
32
-
33
- class DropDateObject:
34
- """
35
- Custom parser for date answers in DROP.
36
- A date answer is a dict <date> with at least one of day|month|year.
37
-
38
- Example: date == {
39
- 'day': '9',
40
- 'month': 'March',
41
- 'year': '2021'
42
- }
43
-
44
- This dict is parsed and flattend to '{day} {month} {year}', not including
45
- blank values.
46
-
47
- Example: str(DropDateObject(date)) == '9 March 2021'
48
- """
49
-
50
- def __init__(self, dict_date):
51
- self.year = dict_date.get("year", "")
52
- self.month = dict_date.get("month", "")
53
- self.day = dict_date.get("day", "")
54
-
55
- def __iter__(self):
56
- yield from [self.day, self.month, self.year]
57
-
58
- def __bool__(self):
59
- return any(self)
60
-
61
- def __repr__(self):
62
- return " ".join(self).strip()
63
-
64
-
65
- class Drop(datasets.GeneratorBasedBuilder):
66
- """TODO(drop): Short description of my dataset."""
67
-
68
- # TODO(drop): Set up version.
69
- VERSION = datasets.Version("0.1.0")
70
-
71
- def _info(self):
72
- # TODO(drop): Specifies the datasets.DatasetInfo object
73
- return datasets.DatasetInfo(
74
- # This is the description that will appear on the datasets page.
75
- description=_DESCRIPTION,
76
- # datasets.features.FeatureConnectors
77
- features=datasets.Features(
78
- {
79
- "section_id": datasets.Value("string"),
80
- "query_id": datasets.Value("string"),
81
- "passage": datasets.Value("string"),
82
- "question": datasets.Value("string"),
83
- "answers_spans": datasets.features.Sequence(
84
- {"spans": datasets.Value("string"), "types": datasets.Value("string")}
85
- )
86
- # These are the features of your dataset like images, labels ...
87
- }
88
- ),
89
- # If there's a common (input, target) tuple from the features,
90
- # specify them here. They'll be used if as_supervised=True in
91
- # builder.as_dataset.
92
- supervised_keys=None,
93
- # Homepage of the dataset for documentation
94
- homepage="https://allennlp.org/drop",
95
- citation=_CITATION,
96
- )
97
-
98
- def _split_generators(self, dl_manager):
99
- """Returns SplitGenerators."""
100
- # TODO(drop): Downloads the data and defines the splits
101
- # dl_manager is a datasets.download.DownloadManager that can be used to
102
- # download and extract URLs
103
- dl_dir = dl_manager.download_and_extract(_URL)
104
- data_dir = os.path.join(dl_dir, "drop_dataset")
105
- return [
106
- datasets.SplitGenerator(
107
- name=datasets.Split.TRAIN,
108
- # These kwargs will be passed to _generate_examples
109
- gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_train.json"), "split": "train"},
110
- ),
111
- datasets.SplitGenerator(
112
- name=datasets.Split.VALIDATION,
113
- # These kwargs will be passed to _generate_examples
114
- gen_kwargs={"filepath": os.path.join(data_dir, "drop_dataset_dev.json"), "split": "validation"},
115
- ),
116
- ]
117
-
118
- def _generate_examples(self, filepath, split):
119
- """Yields examples."""
120
- # TODO(drop): Yields (key, example) tuples from the dataset
121
- with open(filepath, mode="r", encoding="utf-8") as f:
122
- data = json.load(f)
123
- id_ = 0
124
- for i, (section_id, section) in enumerate(data.items()):
125
- for j, qa in enumerate(section["qa_pairs"]):
126
-
127
- example = {
128
- "section_id": section_id,
129
- "query_id": qa["query_id"],
130
- "passage": section["passage"],
131
- "question": qa["question"],
132
- }
133
-
134
- if split == "train":
135
- answers = [qa["answer"]]
136
- else:
137
- answers = qa["validated_answers"]
138
-
139
- try:
140
- example["answers_spans"] = self.build_answers(answers)
141
- yield id_, example
142
- id_ += 1
143
- except AnswerParsingError:
144
- # This is expected for 9 examples of train
145
- # and 1 of validation.
146
- continue
147
-
148
- @staticmethod
149
- def _raise(message):
150
- """
151
- Raise a custom AnswerParsingError, to be sure to only catch our own
152
- errors. Messages are irrelavant for this script, but are written to
153
- ease understanding the code.
154
- """
155
- raise AnswerParsingError(message)
156
-
157
- def build_answers(self, answers):
158
-
159
- returned_answers = {
160
- "spans": list(),
161
- "types": list(),
162
- }
163
- for answer in answers:
164
- date = DropDateObject(answer["date"])
165
-
166
- if answer["number"] != "":
167
- # sanity checks
168
- if date:
169
- self._raise("This answer is both number and date!")
170
- if len(answer["spans"]):
171
- self._raise("This answer is both number and text!")
172
-
173
- returned_answers["spans"].append(answer["number"])
174
- returned_answers["types"].append("number")
175
-
176
- elif date:
177
- # sanity check
178
- if len(answer["spans"]):
179
- self._raise("This answer is both date and text!")
180
-
181
- returned_answers["spans"].append(str(date))
182
- returned_answers["types"].append("date")
183
-
184
- # won't triger if len(answer['spans']) == 0
185
- for span in answer["spans"]:
186
- # sanity checks
187
- if answer["number"] != "":
188
- self._raise("This answer is both text and number!")
189
- if date:
190
- self._raise("This answer is both text and date!")
191
-
192
- returned_answers["spans"].append(span)
193
- returned_answers["types"].append("span")
194
-
195
- # sanity check
196
- _len = len(returned_answers["spans"])
197
- if not _len:
198
- self._raise("Empty answer.")
199
- if any(len(l) != _len for _, l in returned_answers.items()):
200
- self._raise("Something went wrong while parsing answer values/types")
201
-
202
- return returned_answers