File size: 4,379 Bytes
f48e76f
 
 
a63f83e
 
 
f48e76f
9734235
 
f48e76f
a63f83e
f48e76f
 
 
 
 
 
 
 
 
a63f83e
 
f48e76f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9734235
 
f48e76f
 
 
 
9734235
a63f83e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f48e76f
9734235
 
 
f48e76f
 
9734235
 
 
 
 
f48e76f
 
9734235
f48e76f
9734235
 
a63f83e
 
 
9734235
f48e76f
 
 
a63f83e
 
 
 
 
 
 
 
 
 
 
9734235
a63f83e
 
 
 
 
 
 
9734235
a63f83e
 
 
 
 
 
9734235
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import os
import json
import datasets
import logging

logger = logging.getLogger(__name__)

_CITATION = ""
_DESCRIPTION = ""

_BASE_URL = "https://huggingface.co/datasets/JesseLiu/MedQA_Maze/resolve/main"

class MedQaMazeConfig(datasets.BuilderConfig):
    def __init__(self, **kwargs):
        super().__init__(version=datasets.Version("1.0.0"), **kwargs)

class MedQaMaze(datasets.GeneratorBasedBuilder):
    BUILDER_CONFIGS = [
        MedQaMazeConfig(name="default", description="A default config"),
        MedQaMazeConfig(name="advance", description="Advanced-level test data"),
        MedQaMazeConfig(name="all",     description="Full dataset with train and test"),
        MedQaMazeConfig(name="basic",   description="Basic-level test data"),
        MedQaMazeConfig(name="challenge", description="Challenge-level test data"),
    ]

    DEFAULT_CONFIG_NAME = "all"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                "context": datasets.Value("string"),
                "question": datasets.Value("string"),
                "prerequisit": datasets.Value("string"),
                "groundtruth_zoo": datasets.Sequence(datasets.Value("string")),
                "answer": datasets.Value("string"),
            }),
            supervised_keys=None,
            homepage="https://huggingface.co/datasets/JesseLiu/MedQA_Maze",
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        config_name = self.config.name if self.config.name != "default" else "all"

        urls = {}
        if config_name == "all":
            urls = {
                "train": f"{_BASE_URL}/all/train.jsonl",
                "test": f"{_BASE_URL}/all/test.jsonl"
            }
        elif config_name in ["basic", "advance", "challenge"]:
            urls = {
                "test": f"{_BASE_URL}/{config_name}/test.jsonl"
            }
        else:
            raise ValueError(f"Unsupported config: {config_name}")

        try:
            data_files = dl_manager.download(urls)
            logger.info(f"Downloaded files: {data_files}")
        except Exception as e:
            raise ValueError(f"Failed to download files: {e}")

        splits = []
        if "train" in data_files:
            splits.append(
                datasets.SplitGenerator(
                    name=datasets.Split.TRAIN,
                    gen_kwargs={"filepath": data_files["train"]}
                )
            )
        if "test" in data_files:
            splits.append(
                datasets.SplitGenerator(
                    name=datasets.Split.TEST,
                    gen_kwargs={"filepath": data_files["test"]}
                )
            )

        if not splits:
            raise ValueError(f"No valid splits found for config {config_name}")

        return splits

    def _generate_examples(self, filepath):
        """Yields examples."""
        logger.info(f"Generating examples from {filepath}")
        
        if not os.path.exists(filepath):
            raise ValueError(f"File not found: {filepath}")
            
        with open(filepath, 'r', encoding='utf-8') as f:
            content = f.read().strip()
            # Split by newlines and filter out empty lines
            lines = [line.strip() for line in content.split('\n') if line.strip()]
            
            for idx, line in enumerate(lines):
                try:
                    data = json.loads(line)
                    example = {
                        "context": str(data.get("context", "")),
                        "question": str(data.get("question", "")),
                        "prerequisit": str(data.get("prerequisit", "")),
                        "groundtruth_zoo": [str(x) for x in data.get("groundtruth_zoo", [])],
                        "answer": str(data.get("answer", "")),
                    }
                    yield idx, example
                except json.JSONDecodeError as e:
                    logger.error(f"Error parsing JSON at line {idx} in {filepath}: {e}\nLine content: {line[:100]}...")
                    continue
                except Exception as e:
                    logger.error(f"Unexpected error processing line {idx} in {filepath}: {e}")
                    continue