brain-structure / brain-structure.py
jesse-radiata's picture
Add files using upload-large-folder tool
2eccbda verified
raw
history blame
8.02 kB
import os
import json
import datasets
import logging
logger = logging.getLogger(__name__)
_DESCRIPTION = """
This dataset contains T1-weighted .nii.gz structural MRI scans in a BIDS-like arrangement.
Each scan has an associated JSON sidecar with metadata, including fields such as subject
demographics, scanner information, and a 'split' field indicating train/validation/test.
"""
_CITATION = """
@dataset{Radiata-Brain-Structure,
author = {Jesse Brown and Clayton Young},
title = {Brain-Structure: A Collection of Processed Structural MRI Scans},
year = {2025},
url = {https://huggingface.co/datasets/radiata-ai/brain-structure},
note = {Version 1.0},
publisher = {Hugging Face}
}
"""
_HOMEPAGE = "https://huggingface.co/datasets/radiata-ai/brain-structure"
_LICENSE = "ODC-By v1.0"
class BrainStructureConfig(datasets.BuilderConfig):
"""
Configuration class for the Brain-Structure dataset.
You can define multiple configurations if needed (e.g. different subsets).
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
class BrainStructure(datasets.GeneratorBasedBuilder):
"""
A dataset loader for T1 .nii.gz files plus JSON sidecars.
Each sidecar includes a 'split' field identifying whether the scan
belongs to the train, validation, or test set.
Usage Example:
ds = load_dataset(
"radiata-ai/brain-structure",
name="all",
split="train",
trust_remote_code=True
)
"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
BrainStructureConfig(
name="all",
version=VERSION,
description=(
"All structural MRI data in a BIDS-like arrangement, labeled "
"with train/validation/test splits."
),
),
]
DEFAULT_CONFIG_NAME = "all"
def _info(self):
"""
Provides metadata about the dataset, including feature types
and general dataset information.
"""
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"nii_filepath": datasets.Value("string"),
"metadata": {
"split": datasets.Value("string"),
"participant_id": datasets.Value("string"),
"session_id": datasets.Value("string"),
"study": datasets.Value("string"),
# Additional fields from the JSON sidecar
"age": datasets.Value("int32"),
"sex": datasets.Value("string"),
"clinical_diagnosis": datasets.Value("string"),
"scanner_manufacturer": datasets.Value("string"),
"scanner_model": datasets.Value("string"),
"field_strength": datasets.Value("string"),
"image_quality_rating": datasets.Value("float"),
"total_intracranial_volume": datasets.Value("float"),
"license": datasets.Value("string"),
"website": datasets.Value("string"),
"citation": datasets.Value("string"),
"t1_file_name": datasets.Value("string"),
"radiata_id": datasets.Value("int32"),
},
}
),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
"""
Returns SplitGenerators for 'train', 'validation', and 'test'.
Each split is identified by matching the 'split' field in the JSON sidecar.
"""
data_dir = dl_manager.dataset_dir
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir, "desired_split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_dir": data_dir, "desired_split": "validation"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_dir": data_dir, "desired_split": "test"},
),
]
def _generate_examples(self, data_dir, desired_split):
"""
Recursively scan the data_dir, locate JSON sidecar files, and yield
examples whose 'split' field matches desired_split.
Each yielded example includes:
- 'nii_filepath': pointing to the corresponding .nii.gz file
- 'metadata': dictionary of subject and scan information
"""
id_ = 0
for root, dirs, files in os.walk(data_dir):
for fname in files:
if fname.endswith("_scandata.json"):
sidecar_path = os.path.join(root, fname)
with open(sidecar_path, "r") as f:
sidecar = json.load(f)
# Only yield if 'split' matches the desired split
if sidecar.get("split") == desired_split:
# Attempt to locate the matching .nii.gz file
# Typically the sidecar is named sub-xxx_ses-xxx_scandata.json
# and the NIfTI file: sub-xxx_ses-xxx_T1w.nii.gz
possible_nii_prefix = fname.replace("_scandata.json", "_T1w")
nii_filepath = None
for potential_file in files:
if (potential_file.startswith(possible_nii_prefix)
and potential_file.endswith(".nii.gz")):
nii_filepath = os.path.join(root, potential_file)
break
if not nii_filepath:
logger.warning(
f"No corresponding .nii.gz file found for {sidecar_path}"
)
continue
# Build the example
yield id_, {
"id": str(id_),
"nii_filepath": nii_filepath,
"metadata": {
"split": sidecar.get("split", ""),
"participant_id": sidecar.get("participant_id", ""),
"session_id": sidecar.get("session_id", ""),
"study": sidecar.get("study", ""),
"age": sidecar.get("age", 0), # default to 0 if missing
"sex": sidecar.get("sex", ""),
"clinical_diagnosis": sidecar.get("clinical_diagnosis", ""),
"scanner_manufacturer": sidecar.get("scanner_manufacturer", ""),
"scanner_model": sidecar.get("scanner_model", ""),
"field_strength": sidecar.get("field_strength", ""),
"image_quality_rating": float(sidecar.get("image_quality_rating", 0.0)),
"total_intracranial_volume": float(sidecar.get("total_intracranial_volume", 0.0)),
"license": sidecar.get("license", ""),
"website": sidecar.get("website", ""),
"citation": sidecar.get("citation", ""),
"t1_file_name": sidecar.get("t1_file_name", ""),
"radiata_id": sidecar.get("radiata_id", 0),
},
}
id_ += 1