Datasets:
Tasks:
Multiple Choice
Modalities:
Text
Sub-tasks:
multiple-choice-qa
Languages:
English
Size:
1K - 10K
ArXiv:
License:
import json | |
import datasets | |
_CITATION = """\ | |
@article{lai2017large, | |
title={RACE: Large-scale ReAding Comprehension Dataset From Examinations}, | |
author={Lai, Guokun and Xie, Qizhe and Liu, Hanxiao and Yang, Yiming and Hovy, Eduard}, | |
journal={arXiv preprint arXiv:1704.04683}, | |
year={2017} | |
} | |
""" | |
_DESCRIPTION = """\ | |
Race is a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The | |
dataset is collected from English examinations in China, which are designed for middle school and high school students. | |
The dataset can be served as the training and test sets for machine comprehension. | |
""" | |
_BASE_URL = "https://huggingface.co/datasets/bfattori/race/raw/main" | |
_URLS = { | |
"high": f"{_BASE_URL}/race_high_test.jsonl", | |
} | |
class Race(datasets.GeneratorBasedBuilder): | |
"""ReAding Comprehension Dataset From Examination dataset from CMU""" | |
VERSION = datasets.Version("0.1.0") | |
BUILDER_CONFIGS = [ | |
datasets.BuilderConfig(name="high", description="Exams designed for high school students", version=VERSION), | |
] | |
def _info(self): | |
features = datasets.Features( | |
{ | |
"article": datasets.Value("string"), | |
"problems": datasets.Value("string"), | |
} | |
) | |
return datasets.DatasetInfo( | |
description=f"{_DESCRIPTION}\n{self.config.description}", | |
features=features, | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
urls = _URLS[self.config.name] | |
data_dir = dl_manager.download_and_extract(urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
# These kwargs will be passed to _generate_examples | |
gen_kwargs={ | |
"filepath": data_dir, | |
"split": datasets.Split.TEST, | |
}, | |
), | |
] | |
def _generate_examples(self, filepath, split): | |
with open(filepath, encoding="utf-8") as f: | |
for key, row in enumerate(f): | |
data = json.loads(row) | |
yield key, {"article": data["article"], "problems": data["problems"]} |