JesseLiu
commited on
Commit
·
f48e76f
1
Parent(s):
05cc00f
update py
Browse files- medqa_maze.py → MedQA_Maze.py +57 -33
- __init__.py +2 -0
- advance/test.jsonl +2 -2
- basic/test.jsonl +2 -2
- challenge/test.jsonl +2 -2
- test_dataset.py +54 -0
medqa_maze.py → MedQA_Maze.py
RENAMED
@@ -1,16 +1,19 @@
|
|
1 |
import os
|
2 |
import json
|
3 |
import datasets
|
|
|
4 |
|
5 |
-
_CITATION = ""
|
6 |
-
_DESCRIPTION = ""
|
7 |
|
8 |
|
|
|
|
|
|
|
9 |
class MedQaMazeConfig(datasets.BuilderConfig):
|
10 |
def __init__(self, **kwargs):
|
11 |
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
12 |
|
13 |
-
|
14 |
class MedQaMaze(datasets.GeneratorBasedBuilder):
|
15 |
BUILDER_CONFIGS = [
|
16 |
MedQaMazeConfig(name="default", description="A default config"),
|
@@ -20,10 +23,11 @@ class MedQaMaze(datasets.GeneratorBasedBuilder):
|
|
20 |
MedQaMazeConfig(name="challenge", description="Challenge-level test data"),
|
21 |
]
|
22 |
|
|
|
|
|
23 |
def _info(self):
|
24 |
return datasets.DatasetInfo(
|
25 |
description=_DESCRIPTION,
|
26 |
-
citation=_CITATION,
|
27 |
features=datasets.Features({
|
28 |
"context": datasets.Value("string"),
|
29 |
"question": datasets.Value("string"),
|
@@ -31,55 +35,75 @@ class MedQaMaze(datasets.GeneratorBasedBuilder):
|
|
31 |
"groundtruth_zoo": datasets.Sequence(datasets.Value("string")),
|
32 |
"answer": datasets.Value("string"),
|
33 |
}),
|
|
|
34 |
)
|
35 |
|
36 |
def _split_generators(self, dl_manager):
|
37 |
-
|
|
|
|
|
|
|
|
|
38 |
config_name = self.config.name
|
|
|
39 |
|
|
|
40 |
if config_name == "advance":
|
|
|
|
|
|
|
|
|
|
|
41 |
return [
|
42 |
datasets.SplitGenerator(
|
43 |
name=datasets.Split.TEST,
|
44 |
-
gen_kwargs={"filepath":
|
45 |
)
|
46 |
]
|
47 |
elif config_name == "all":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
return [
|
49 |
datasets.SplitGenerator(
|
50 |
name=datasets.Split.TRAIN,
|
51 |
-
gen_kwargs={"filepath":
|
52 |
),
|
53 |
datasets.SplitGenerator(
|
54 |
name=datasets.Split.TEST,
|
55 |
-
gen_kwargs={"filepath":
|
56 |
-
)
|
57 |
-
]
|
58 |
-
elif config_name == "basic":
|
59 |
-
return [
|
60 |
-
datasets.SplitGenerator(
|
61 |
-
name=datasets.Split.TEST,
|
62 |
-
gen_kwargs={"filepath": os.path.join(base_path, "basic", "test.jsonl")}
|
63 |
-
)
|
64 |
-
]
|
65 |
-
elif config_name == "challenge":
|
66 |
-
return [
|
67 |
-
datasets.SplitGenerator(
|
68 |
-
name=datasets.Split.TEST,
|
69 |
-
gen_kwargs={"filepath": os.path.join(base_path, "challenge", "test.jsonl")}
|
70 |
)
|
71 |
]
|
|
|
72 |
|
73 |
def _generate_examples(self, filepath):
|
74 |
"""Yields examples."""
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import json
|
3 |
import datasets
|
4 |
+
import logging
|
5 |
|
6 |
+
_CITATION = """Your citation here"""
|
7 |
+
_DESCRIPTION = """Description of your medical QA dataset"""
|
8 |
|
9 |
|
10 |
+
logging.basicConfig(level=logging.INFO)
|
11 |
+
logger = logging.getLogger(__name__)
|
12 |
+
|
13 |
class MedQaMazeConfig(datasets.BuilderConfig):
|
14 |
def __init__(self, **kwargs):
|
15 |
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
16 |
|
|
|
17 |
class MedQaMaze(datasets.GeneratorBasedBuilder):
|
18 |
BUILDER_CONFIGS = [
|
19 |
MedQaMazeConfig(name="default", description="A default config"),
|
|
|
23 |
MedQaMazeConfig(name="challenge", description="Challenge-level test data"),
|
24 |
]
|
25 |
|
26 |
+
DEFAULT_CONFIG_NAME = "all"
|
27 |
+
|
28 |
def _info(self):
|
29 |
return datasets.DatasetInfo(
|
30 |
description=_DESCRIPTION,
|
|
|
31 |
features=datasets.Features({
|
32 |
"context": datasets.Value("string"),
|
33 |
"question": datasets.Value("string"),
|
|
|
35 |
"groundtruth_zoo": datasets.Sequence(datasets.Value("string")),
|
36 |
"answer": datasets.Value("string"),
|
37 |
}),
|
38 |
+
supervised_keys=None,
|
39 |
)
|
40 |
|
41 |
def _split_generators(self, dl_manager):
|
42 |
+
"""Returns SplitGenerators."""
|
43 |
+
# Get the absolute path of the script
|
44 |
+
base_path = os.path.dirname(os.path.abspath(__file__))
|
45 |
+
logger.info(f"Base path: {base_path}")
|
46 |
+
|
47 |
config_name = self.config.name
|
48 |
+
logger.info(f"Using config: {config_name}")
|
49 |
|
50 |
+
# Define file paths
|
51 |
if config_name == "advance":
|
52 |
+
filepath = os.path.join(base_path, "advance", "test.jsonl")
|
53 |
+
logger.info(f"Looking for advance test file at: {filepath}")
|
54 |
+
if not os.path.exists(filepath):
|
55 |
+
raise ValueError(f"File not found: {filepath}")
|
56 |
+
|
57 |
return [
|
58 |
datasets.SplitGenerator(
|
59 |
name=datasets.Split.TEST,
|
60 |
+
gen_kwargs={"filepath": filepath}
|
61 |
)
|
62 |
]
|
63 |
elif config_name == "all":
|
64 |
+
train_path = os.path.join(base_path, "all", "train.jsonl")
|
65 |
+
test_path = os.path.join(base_path, "all", "test.jsonl")
|
66 |
+
|
67 |
+
logger.info(f"Looking for train file at: {train_path}")
|
68 |
+
logger.info(f"Looking for test file at: {test_path}")
|
69 |
+
|
70 |
+
if not os.path.exists(train_path):
|
71 |
+
raise ValueError(f"Train file not found: {train_path}")
|
72 |
+
if not os.path.exists(test_path):
|
73 |
+
raise ValueError(f"Test file not found: {test_path}")
|
74 |
+
|
75 |
return [
|
76 |
datasets.SplitGenerator(
|
77 |
name=datasets.Split.TRAIN,
|
78 |
+
gen_kwargs={"filepath": train_path}
|
79 |
),
|
80 |
datasets.SplitGenerator(
|
81 |
name=datasets.Split.TEST,
|
82 |
+
gen_kwargs={"filepath": test_path}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
)
|
84 |
]
|
85 |
+
# ... similar checks for other configs ...
|
86 |
|
87 |
def _generate_examples(self, filepath):
|
88 |
"""Yields examples."""
|
89 |
+
logger.info(f"Processing file: {filepath}")
|
90 |
+
|
91 |
+
try:
|
92 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
93 |
+
for idx, line in enumerate(f):
|
94 |
+
try:
|
95 |
+
data = json.loads(line.strip())
|
96 |
+
example = {
|
97 |
+
"context": data.get("context", ""),
|
98 |
+
"question": data.get("question", ""),
|
99 |
+
"prerequisit": data.get("prerequisit", ""),
|
100 |
+
"groundtruth_zoo": data.get("groundtruth_zoo", []),
|
101 |
+
"answer": data.get("answer", ""),
|
102 |
+
}
|
103 |
+
yield idx, example
|
104 |
+
except json.JSONDecodeError as e:
|
105 |
+
logger.error(f"Error parsing JSON at line {idx} in {filepath}: {e}")
|
106 |
+
continue
|
107 |
+
except Exception as e:
|
108 |
+
logger.error(f"Error reading file {filepath}: {e}")
|
109 |
+
raise
|
__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# __init__.py
|
2 |
+
from .MedQA_Maze import MedQaMaze
|
advance/test.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47210b33fcdb34870968a74700394089280acebf3243be406c50229e5609c2f7
|
3 |
+
size 4638956
|
basic/test.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e8da4550f23f04c0f8224aba503abb91ce5e0e3fca4a10a7adef19a919664bb0
|
3 |
+
size 1913572
|
challenge/test.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:219431ff3bb766f23fe826fdab30c8471b31397b2f882338b0420d0b223e0dcc
|
3 |
+
size 9799682
|
test_dataset.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
def test_jsonl_file(filepath):
|
6 |
+
print(f"\nTesting file: {filepath}")
|
7 |
+
try:
|
8 |
+
if not os.path.exists(filepath):
|
9 |
+
print(f"ERROR: File does not exist: {filepath}")
|
10 |
+
return False
|
11 |
+
|
12 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
13 |
+
for idx, line in enumerate(f, 1):
|
14 |
+
try:
|
15 |
+
data = json.loads(line.strip())
|
16 |
+
# Verify required fields
|
17 |
+
required_fields = ["context", "question", "prerequisit", "groundtruth_zoo", "answer"]
|
18 |
+
missing_fields = [field for field in required_fields if field not in data]
|
19 |
+
if missing_fields:
|
20 |
+
print(f"Warning: Line {idx} is missing fields: {missing_fields}")
|
21 |
+
except json.JSONDecodeError as e:
|
22 |
+
print(f"ERROR: Invalid JSON at line {idx}: {e}")
|
23 |
+
return False
|
24 |
+
print("File is valid!")
|
25 |
+
return True
|
26 |
+
except Exception as e:
|
27 |
+
print(f"ERROR: Failed to process file: {e}")
|
28 |
+
return False
|
29 |
+
|
30 |
+
def main():
|
31 |
+
# Get the directory containing this script
|
32 |
+
base_dir = Path(__file__).parent
|
33 |
+
|
34 |
+
# Test each data file
|
35 |
+
test_files = [
|
36 |
+
base_dir / "all" / "train.jsonl",
|
37 |
+
base_dir / "all" / "test.jsonl",
|
38 |
+
base_dir / "basic" / "test.jsonl",
|
39 |
+
base_dir / "advance" / "test.jsonl",
|
40 |
+
base_dir / "challenge" / "test.jsonl",
|
41 |
+
]
|
42 |
+
|
43 |
+
all_valid = True
|
44 |
+
for file_path in test_files:
|
45 |
+
if not test_jsonl_file(file_path):
|
46 |
+
all_valid = False
|
47 |
+
|
48 |
+
if all_valid:
|
49 |
+
print("\nAll files are valid!")
|
50 |
+
else:
|
51 |
+
print("\nSome files have errors. Please fix them before proceeding.")
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
main()
|