Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Languages:
English
Size:
10K - 100K
ArXiv:
License:
Update files from the datasets library (from 1.8.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.8.0
- README.md +4 -2
- adversarial_qa.py +5 -2
README.md
CHANGED
@@ -139,7 +139,9 @@ Data is provided in the same format as SQuAD 1.1. An example is shown below:
|
|
139 |
- title: the title of the Wikipedia page from which the context is sourced
|
140 |
- context: the context/passage
|
141 |
- id: a string identifier for each question
|
142 |
-
- answers: a list of all provided answers (one per question in our case, but multiple may exist in SQuAD) with an `answer_start` field which is the character index of the start of the answer span, and a `text` field which is the answer text
|
|
|
|
|
143 |
|
144 |
### Data Splits
|
145 |
|
@@ -223,4 +225,4 @@ This dataset is distributed under [CC BY-SA 3.0](https://creativecommons.org/lic
|
|
223 |
```
|
224 |
### Contributions
|
225 |
|
226 |
-
Thanks to [@maxbartolo](https://github.com/maxbartolo) for adding this dataset.
|
|
|
139 |
- title: the title of the Wikipedia page from which the context is sourced
|
140 |
- context: the context/passage
|
141 |
- id: a string identifier for each question
|
142 |
+
- answers: a list of all provided answers (one per question in our case, but multiple may exist in SQuAD) with an `answer_start` field which is the character index of the start of the answer span, and a `text` field which is the answer text.
|
143 |
+
|
144 |
+
Note that no answers are provided in the test set. Indeed, this dataset is part of the DynaBench benchmark, for which you can submit your predictions on the [website](https://dynabench.org/tasks/2#1).
|
145 |
|
146 |
### Data Splits
|
147 |
|
|
|
225 |
```
|
226 |
### Contributions
|
227 |
|
228 |
+
Thanks to [@maxbartolo](https://github.com/maxbartolo) for adding this dataset.
|
adversarial_qa.py
CHANGED
@@ -169,13 +169,14 @@ class AdversarialQA(datasets.GeneratorBasedBuilder):
|
|
169 |
logger.info("generating examples from = %s", filepath)
|
170 |
with open(filepath, encoding="utf-8") as f:
|
171 |
squad = json.load(f)
|
|
|
172 |
for article in squad["data"]:
|
173 |
title = article.get("title", "").strip()
|
174 |
for paragraph in article["paragraphs"]:
|
175 |
context = paragraph["context"].strip()
|
176 |
for qa in paragraph["qas"]:
|
177 |
question = qa["question"].strip()
|
178 |
-
|
179 |
|
180 |
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
|
181 |
answers = [answer["text"].strip() for answer in qa["answers"]]
|
@@ -188,10 +189,12 @@ class AdversarialQA(datasets.GeneratorBasedBuilder):
|
|
188 |
"title": title,
|
189 |
"context": context,
|
190 |
"question": question,
|
191 |
-
"id":
|
192 |
"answers": {
|
193 |
"answer_start": answer_starts,
|
194 |
"text": answers,
|
195 |
},
|
196 |
"metadata": {"split": split, "model_in_the_loop": model_in_the_loop},
|
197 |
}
|
|
|
|
|
|
169 |
logger.info("generating examples from = %s", filepath)
|
170 |
with open(filepath, encoding="utf-8") as f:
|
171 |
squad = json.load(f)
|
172 |
+
id_ = 0
|
173 |
for article in squad["data"]:
|
174 |
title = article.get("title", "").strip()
|
175 |
for paragraph in article["paragraphs"]:
|
176 |
context = paragraph["context"].strip()
|
177 |
for qa in paragraph["qas"]:
|
178 |
question = qa["question"].strip()
|
179 |
+
qid = qa["id"]
|
180 |
|
181 |
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
|
182 |
answers = [answer["text"].strip() for answer in qa["answers"]]
|
|
|
189 |
"title": title,
|
190 |
"context": context,
|
191 |
"question": question,
|
192 |
+
"id": qid,
|
193 |
"answers": {
|
194 |
"answer_start": answer_starts,
|
195 |
"text": answers,
|
196 |
},
|
197 |
"metadata": {"split": split, "model_in_the_loop": model_in_the_loop},
|
198 |
}
|
199 |
+
|
200 |
+
id_ += 1
|