|
import os |
|
import pandas as pd |
|
import nibabel as nib |
|
import datasets |
|
from datasets import ( |
|
GeneratorBasedBuilder, |
|
SplitGenerator, |
|
Split, |
|
DatasetInfo, |
|
Features, |
|
Array3D, |
|
Value, |
|
) |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
EXPECTED_SHAPE = (160, 384, 384) |
|
|
|
_CITATION = """\ |
|
@article{YAO2024103035, |
|
title = {CartiMorph: A framework for automated knee articular cartilage morphometrics}, |
|
journal = {Medical Image Analysis}, |
|
author = {Yongcheng Yao and Junru Zhong and Liping Zhang and Sheheryar Khan and Weitian Chen}, |
|
volume = {91}, |
|
pages = {103035}, |
|
year = {2024}, |
|
issn = {1361-8415}, |
|
doi = {https://doi.org/10.1016/j.media.2023.103035} |
|
} |
|
|
|
@InProceedings{10.1007/978-3-031-82007-6_16, |
|
author = {Yao, Yongcheng and Chen, Weitian}, |
|
editor = {Wu, Shandong and Shabestari, Behrouz and Xing, Lei}, |
|
title = {Quantifying Knee Cartilage Shape and Lesion: From Image to Metrics}, |
|
booktitle = {Applications of Medical Artificial Intelligence}, |
|
year = {2025}, |
|
publisher = {Springer Nature Switzerland}, |
|
address = {Cham}, |
|
pages = {162--172} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
This is the official release of the OAIZIB-CM dataset. |
|
|
|
OAIZIB-CM is based on the OAIZIB dataset. |
|
OAIZIB paper: Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge |
|
and Convolutional Neural Networks: Data from the Osteoarthritis Initiative. |
|
In OAIZIB-CM, tibial cartilage is split into medial and lateral tibial cartilages. |
|
OAIZIB-CM includes CLAIR-Knee-103R, consisting of: |
|
- a template image learned from 103 MR images of subjects without radiographic OA, |
|
- a corresponding 5-ROI segmentation mask for cartilages and bones, and |
|
- a corresponding 20-ROI atlas for articular cartilages. |
|
|
|
This dataset is released under the CC BY-NC 4.0 license. |
|
""" |
|
|
|
_HOMEPAGE_URL = "https://github.com/YongchengYAO/CartiMorph" |
|
|
|
|
|
class OAIZIBCMDataset(GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
|
|
return DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=Features({ |
|
"image": Array3D(shape=EXPECTED_SHAPE, dtype="float32"), |
|
"mask": Array3D(shape=EXPECTED_SHAPE, dtype="float32"), |
|
"image_path": Value("string"), |
|
"mask_path": Value("string"), |
|
}), |
|
homepage=_HOMEPAGE_URL, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
base_url = "https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/resolve/load_dataset-support" |
|
train_csv_url = f"{base_url}/train.csv" |
|
test_csv_url = f"{base_url}/test.csv" |
|
csv_paths = dl_manager.download({"train": train_csv_url, "test": test_csv_url}) |
|
logger.info(f"Downloaded CSV paths: {csv_paths}") |
|
|
|
|
|
data_root_dir = dl_manager.download_and_extract( |
|
"https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/resolve/load_dataset-support/data/OAIZIB-CM.zip" |
|
) |
|
data_dir = os.path.join(data_root_dir, "OAIZIB-CM") |
|
logger.info(f"Data directory set to {data_dir}") |
|
|
|
|
|
train_df = pd.read_csv(csv_paths["train"]) |
|
test_df = pd.read_csv(csv_paths["test"]) |
|
|
|
|
|
return [ |
|
SplitGenerator( |
|
name=Split.TRAIN, |
|
gen_kwargs={"df": train_df, "split": "train", "data_dir": data_dir}, |
|
), |
|
SplitGenerator( |
|
name=Split.TEST, |
|
gen_kwargs={"df": test_df, "split": "test", "data_dir": data_dir}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, df, split, data_dir): |
|
|
|
if split == "train": |
|
img_dir = os.path.join(data_dir, "imagesTr") |
|
mask_dir = os.path.join(data_dir, "labelsTr") |
|
elif split == "test": |
|
img_dir = os.path.join(data_dir, "imagesTs") |
|
mask_dir = os.path.join(data_dir, "labelsTs") |
|
else: |
|
raise ValueError(f"Unknown split: {split}") |
|
|
|
|
|
logger.info(f"Looking for {split} images in: {img_dir}") |
|
logger.info(f"Looking for {split} masks in: {mask_dir}") |
|
os.makedirs(img_dir, exist_ok=True) |
|
os.makedirs(mask_dir, exist_ok=True) |
|
|
|
|
|
count = 0 |
|
skipped = 0 |
|
for idx, row in df.iterrows(): |
|
img_file = row["image"] |
|
mask_file = row["mask"] |
|
img_path = os.path.join(img_dir, img_file) |
|
mask_path = os.path.join(mask_dir, mask_file) |
|
|
|
|
|
if not os.path.exists(img_path): |
|
logger.warning(f"Image not found: {img_path}") |
|
skipped += 1 |
|
continue |
|
if not os.path.exists(mask_path): |
|
logger.warning(f"Mask not found: {mask_path}") |
|
skipped += 1 |
|
continue |
|
|
|
try: |
|
|
|
img_nib = nib.load(img_path) |
|
image = img_nib.get_fdata().astype("float32") |
|
mask_nib = nib.load(mask_path) |
|
mask = mask_nib.get_fdata().astype("float32") |
|
|
|
yield idx, { |
|
"image": image, |
|
"mask": mask, |
|
"image_path": img_path, |
|
"mask_path": mask_path, |
|
} |
|
count += 1 |
|
except Exception as e: |
|
logger.error(f"Error processing {img_path} and {mask_path}: {e}") |
|
skipped += 1 |
|
|
|
logger.info(f"Successfully yielded {count} examples for {split} split, skipped {skipped}") |
|
|