|
import os |
|
import json |
|
import datasets |
|
import logging |
|
|
|
_CITATION = """Your citation here""" |
|
_DESCRIPTION = """Description of your medical QA dataset""" |
|
|
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
class MedQaMazeConfig(datasets.BuilderConfig): |
|
def __init__(self, **kwargs): |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
|
|
class MedQaMaze(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
MedQaMazeConfig(name="default", description="A default config"), |
|
MedQaMazeConfig(name="advance", description="Advanced-level test data"), |
|
MedQaMazeConfig(name="all", description="Full dataset with train and test"), |
|
MedQaMazeConfig(name="basic", description="Basic-level test data"), |
|
MedQaMazeConfig(name="challenge", description="Challenge-level test data"), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"context": datasets.Value("string"), |
|
"question": datasets.Value("string"), |
|
"prerequisit": datasets.Value("string"), |
|
"groundtruth_zoo": datasets.Sequence(datasets.Value("string")), |
|
"answer": datasets.Value("string"), |
|
}), |
|
supervised_keys=None, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
base_path = os.path.dirname(os.path.abspath(__file__)) |
|
logger.info(f"Base path: {base_path}") |
|
|
|
config_name = self.config.name |
|
logger.info(f"Using config: {config_name}") |
|
|
|
|
|
if config_name == "advance": |
|
filepath = os.path.join(base_path, "advance", "test.jsonl") |
|
logger.info(f"Looking for advance test file at: {filepath}") |
|
if not os.path.exists(filepath): |
|
raise ValueError(f"File not found: {filepath}") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": filepath} |
|
) |
|
] |
|
elif config_name == "all": |
|
train_path = os.path.join(base_path, "all", "train.jsonl") |
|
test_path = os.path.join(base_path, "all", "test.jsonl") |
|
|
|
logger.info(f"Looking for train file at: {train_path}") |
|
logger.info(f"Looking for test file at: {test_path}") |
|
|
|
if not os.path.exists(train_path): |
|
raise ValueError(f"Train file not found: {train_path}") |
|
if not os.path.exists(test_path): |
|
raise ValueError(f"Test file not found: {test_path}") |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": train_path} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": test_path} |
|
) |
|
] |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
logger.info(f"Processing file: {filepath}") |
|
|
|
try: |
|
with open(filepath, "r", encoding="utf-8") as f: |
|
for idx, line in enumerate(f): |
|
try: |
|
data = json.loads(line.strip()) |
|
example = { |
|
"context": data.get("context", ""), |
|
"question": data.get("question", ""), |
|
"prerequisit": data.get("prerequisit", ""), |
|
"groundtruth_zoo": data.get("groundtruth_zoo", []), |
|
"answer": data.get("answer", ""), |
|
} |
|
yield idx, example |
|
except json.JSONDecodeError as e: |
|
logger.error(f"Error parsing JSON at line {idx} in {filepath}: {e}") |
|
continue |
|
except Exception as e: |
|
logger.error(f"Error reading file {filepath}: {e}") |
|
raise |