MedQA_Maze / MedQA_Maze.py
JesseLiu's picture
Update MedQA_Maze.py
a63f83e verified
import os
import json
import datasets
import logging
logger = logging.getLogger(__name__)
_CITATION = ""
_DESCRIPTION = ""
_BASE_URL = "https://huggingface.co/datasets/JesseLiu/MedQA_Maze/resolve/main"
class MedQaMazeConfig(datasets.BuilderConfig):
def __init__(self, **kwargs):
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
class MedQaMaze(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
MedQaMazeConfig(name="default", description="A default config"),
MedQaMazeConfig(name="advance", description="Advanced-level test data"),
MedQaMazeConfig(name="all", description="Full dataset with train and test"),
MedQaMazeConfig(name="basic", description="Basic-level test data"),
MedQaMazeConfig(name="challenge", description="Challenge-level test data"),
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"prerequisit": datasets.Value("string"),
"groundtruth_zoo": datasets.Sequence(datasets.Value("string")),
"answer": datasets.Value("string"),
}),
supervised_keys=None,
homepage="https://huggingface.co/datasets/JesseLiu/MedQA_Maze",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
config_name = self.config.name if self.config.name != "default" else "all"
urls = {}
if config_name == "all":
urls = {
"train": f"{_BASE_URL}/all/train.jsonl",
"test": f"{_BASE_URL}/all/test.jsonl"
}
elif config_name in ["basic", "advance", "challenge"]:
urls = {
"test": f"{_BASE_URL}/{config_name}/test.jsonl"
}
else:
raise ValueError(f"Unsupported config: {config_name}")
try:
data_files = dl_manager.download(urls)
logger.info(f"Downloaded files: {data_files}")
except Exception as e:
raise ValueError(f"Failed to download files: {e}")
splits = []
if "train" in data_files:
splits.append(
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": data_files["train"]}
)
)
if "test" in data_files:
splits.append(
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": data_files["test"]}
)
)
if not splits:
raise ValueError(f"No valid splits found for config {config_name}")
return splits
def _generate_examples(self, filepath):
"""Yields examples."""
logger.info(f"Generating examples from {filepath}")
if not os.path.exists(filepath):
raise ValueError(f"File not found: {filepath}")
with open(filepath, 'r', encoding='utf-8') as f:
content = f.read().strip()
# Split by newlines and filter out empty lines
lines = [line.strip() for line in content.split('\n') if line.strip()]
for idx, line in enumerate(lines):
try:
data = json.loads(line)
example = {
"context": str(data.get("context", "")),
"question": str(data.get("question", "")),
"prerequisit": str(data.get("prerequisit", "")),
"groundtruth_zoo": [str(x) for x in data.get("groundtruth_zoo", [])],
"answer": str(data.get("answer", "")),
}
yield idx, example
except json.JSONDecodeError as e:
logger.error(f"Error parsing JSON at line {idx} in {filepath}: {e}\nLine content: {line[:100]}...")
continue
except Exception as e:
logger.error(f"Unexpected error processing line {idx} in {filepath}: {e}")
continue