YongchengYAO commited on
Commit
f5475ad
·
1 Parent(s): 4e9fcba
Files changed (1) hide show
  1. OAIZIB-CM.py +139 -85
OAIZIB-CM.py CHANGED
@@ -1,114 +1,168 @@
1
  import os
2
  import pandas as pd
3
  import nibabel as nib
4
- from datasets import GeneratorBasedBuilder, SplitGenerator, Split, DatasetInfo, Features, Array3D, Value
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  class OAIZIBCMDataset(GeneratorBasedBuilder):
7
- VERSION = "1.0.0"
8
 
9
  def _info(self):
10
  return DatasetInfo(
11
- description=(
12
- "This is the official release of the OAIZIB-CM dataset.\n\n"
13
- "OAIZIB-CM is based on the OAIZIB dataset.\n"
14
- "OAIZIB paper: Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge "
15
- "and Convolutional Neural Networks: Data from the Osteoarthritis Initiative.\n"
16
- "In OAIZIB-CM, tibial cartilage is split into medial and lateral tibial cartilages.\n"
17
- "OAIZIB-CM includes CLAIR-Knee-103R, consisting of:\n"
18
- " - a template image learned from 103 MR images of subjects without radiographic OA,\n"
19
- " - a corresponding 5-ROI segmentation mask for cartilages and bones, and\n"
20
- " - a corresponding 20-ROI atlas for articular cartilages.\n\n"
21
- "Papers:\n"
22
- "The dataset originates from these projects:\n"
23
- " - CartiMorph: https://github.com/YongchengYAO/CartiMorph\n"
24
- " - CartiMorph Toolbox: https://github.com/YongchengYAO/CartiMorph-Toolbox\n"
25
- " - https://github.com/YongchengYAO/CMT-AMAI24paper\n\n"
26
- "Please cite the following if you use the dataset:\n\n"
27
- "@article{YAO2024103035,\n"
28
- " title = {CartiMorph: A framework for automated knee articular cartilage morphometrics},\n"
29
- " journal = {Medical Image Analysis},\n"
30
- " author = {Yongcheng Yao and Junru Zhong and Liping Zhang and Sheheryar Khan and Weitian Chen},\n"
31
- " volume = {91},\n"
32
- " pages = {103035},\n"
33
- " year = {2024},\n"
34
- " issn = {1361-8415},\n"
35
- " doi = {https://doi.org/10.1016/j.media.2023.103035}\n"
36
- "}\n\n"
37
- "@InProceedings{10.1007/978-3-031-82007-6_16,\n"
38
- " author = {Yao, Yongcheng and Chen, Weitian},\n"
39
- " editor = {Wu, Shandong and Shabestari, Behrouz and Xing, Lei},\n"
40
- " title = {Quantifying Knee Cartilage Shape and Lesion: From Image to Metrics},\n"
41
- " booktitle = {Applications of Medical Artificial Intelligence},\n"
42
- " year = {2025},\n"
43
- " publisher = {Springer Nature Switzerland},\n"
44
- " address = {Cham},\n"
45
- " pages = {162--172}\n"
46
- "}\n\n"
47
- "License:\n"
48
- "This dataset is released under the CC BY-NC 4.0 license. It is compulsory to cite the above papers if you use the dataset.\n"
49
  ),
50
- features=Features({
51
- "image": Array3D(dtype="float32", shape=(None, None, None)),
52
- "mask": Array3D(dtype="float32", shape=(None, None, None)),
53
- "image_path": Value("string"),
54
- "mask_path": Value("string"),
55
- }),
56
  )
57
 
58
  def _split_generators(self, dl_manager):
59
- # Define the base URL where the CSV files are hosted on the HF Hub.
60
- # This URL is constructed to point to the files from the desired revision.
61
  base_url = "https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/resolve/load_dataset-support"
62
  train_csv_url = f"{base_url}/train.csv"
63
  test_csv_url = f"{base_url}/test.csv"
64
-
65
- # Use dl_manager to download the CSV files
66
- train_csv_path = dl_manager.download(train_csv_url)
67
- test_csv_path = dl_manager.download(test_csv_url)
68
-
69
- # Debug: print the downloaded paths (optional)
70
- print("Downloaded Train CSV Path:", train_csv_path)
71
- print("Downloaded Test CSV Path:", test_csv_path)
72
-
73
- train_df = pd.read_csv(train_csv_path)
74
- test_df = pd.read_csv(test_csv_path)
75
-
76
  return [
77
- SplitGenerator(name=Split.TRAIN, gen_kwargs={"df": train_df, "split": "train"}),
78
- SplitGenerator(name=Split.TEST, gen_kwargs={"df": test_df, "split": "test"}),
 
 
 
 
 
 
79
  ]
80
 
81
- def _generate_examples(self, df, split):
82
- # Determine base directories for images and masks based on the split
83
  if split == "train":
84
- base_img_dir = os.path.join("data", "imagesTr")
85
- base_mask_dir = os.path.join("data", "labelsTr")
86
  elif split == "test":
87
- base_img_dir = os.path.join("data", "imagesTs")
88
- base_mask_dir = os.path.join("data", "labelsTs")
89
  else:
90
- base_img_dir = ""
91
- base_mask_dir = ""
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  for idx, row in df.iterrows():
94
  img_file = row["image"]
95
  mask_file = row["mask"]
96
- img_path = os.path.join(base_img_dir, img_file)
97
- mask_path = os.path.join(base_mask_dir, mask_file)
98
- try:
99
- image = nib.load(img_path).get_fdata().astype("float32")
100
- except Exception as e:
101
- print(f"Error loading image from {img_path}: {e}")
 
 
 
 
 
 
102
  continue
 
103
  try:
104
- mask = nib.load(mask_path).get_fdata().astype("float32")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  except Exception as e:
106
- print(f"Error loading mask from {mask_path}: {e}")
107
- continue
108
 
109
- yield idx, {
110
- "image": image,
111
- "mask": mask,
112
- "image_path": img_path,
113
- "mask_path": mask_path,
114
- }
 
1
  import os
2
  import pandas as pd
3
  import nibabel as nib
4
+ import datasets
5
+ from datasets import (
6
+ GeneratorBasedBuilder,
7
+ SplitGenerator,
8
+ Split,
9
+ DatasetInfo,
10
+ Features,
11
+ Array3D,
12
+ Value,
13
+ )
14
+
15
+ logger = datasets.logging.get_logger(__name__)
16
+
17
+ _CITATION = """\
18
+ @article{YAO2024103035,
19
+ title = {CartiMorph: A framework for automated knee articular cartilage morphometrics},
20
+ journal = {Medical Image Analysis},
21
+ author = {Yongcheng Yao and Junru Zhong and Liping Zhang and Sheheryar Khan and Weitian Chen},
22
+ volume = {91},
23
+ pages = {103035},
24
+ year = {2024},
25
+ issn = {1361-8415},
26
+ doi = {https://doi.org/10.1016/j.media.2023.103035}
27
+ }
28
+
29
+ @InProceedings{10.1007/978-3-031-82007-6_16,
30
+ author = {Yao, Yongcheng and Chen, Weitian},
31
+ editor = {Wu, Shandong and Shabestari, Behrouz and Xing, Lei},
32
+ title = {Quantifying Knee Cartilage Shape and Lesion: From Image to Metrics},
33
+ booktitle = {Applications of Medical Artificial Intelligence},
34
+ year = {2025},
35
+ publisher = {Springer Nature Switzerland},
36
+ address = {Cham},
37
+ pages = {162--172}
38
+ }
39
+ """
40
+
41
+ _DESCRIPTION = """\
42
+ This is the official release of the OAIZIB-CM dataset.
43
+
44
+ OAIZIB-CM is based on the OAIZIB dataset.
45
+ OAIZIB paper: Automated Segmentation of Knee Bone and Cartilage combining Statistical Shape Knowledge
46
+ and Convolutional Neural Networks: Data from the Osteoarthritis Initiative.
47
+ In OAIZIB-CM, tibial cartilage is split into medial and lateral tibial cartilages.
48
+ OAIZIB-CM includes CLAIR-Knee-103R, consisting of:
49
+ - a template image learned from 103 MR images of subjects without radiographic OA,
50
+ - a corresponding 5-ROI segmentation mask for cartilages and bones, and
51
+ - a corresponding 20-ROI atlas for articular cartilages.
52
+
53
+ This dataset is released under the CC BY-NC 4.0 license.
54
+ """
55
+
56
+ _HOMEPAGE_URL = "https://github.com/YongchengYAO/CartiMorph"
57
+
58
 
59
  class OAIZIBCMDataset(GeneratorBasedBuilder):
60
+ VERSION = datasets.Version("1.0.0")
61
 
62
  def _info(self):
63
  return DatasetInfo(
64
+ description=_DESCRIPTION,
65
+ features=Features(
66
+ {
67
+ # Let datasets library infer the shape dimensions
68
+ "image": Array3D(dtype="float32"),
69
+ "mask": Array3D(dtype="float32"),
70
+ "image_path": Value("string"),
71
+ "mask_path": Value("string"),
72
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  ),
74
+ homepage=_HOMEPAGE_URL,
75
+ citation=_CITATION,
 
 
 
 
76
  )
77
 
78
  def _split_generators(self, dl_manager):
79
+ # Download metadata files (CSV) that describe the dataset
 
80
  base_url = "https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/resolve/load_dataset-support"
81
  train_csv_url = f"{base_url}/train.csv"
82
  test_csv_url = f"{base_url}/test.csv"
83
+
84
+ # Download CSV files containing dataset metadata
85
+ csv_paths = dl_manager.download({"train": train_csv_url, "test": test_csv_url})
86
+
87
+ logger.info(f"Downloaded CSV paths: {csv_paths}")
88
+
89
+ # Check if local data directory exists
90
+ data_dir = os.path.join(os.getcwd(), "data")
91
+
92
+ train_df = pd.read_csv(csv_paths["train"])
93
+ test_df = pd.read_csv(csv_paths["test"])
94
+
95
  return [
96
+ SplitGenerator(
97
+ name=Split.TRAIN,
98
+ gen_kwargs={"df": train_df, "split": "train", "data_dir": data_dir},
99
+ ),
100
+ SplitGenerator(
101
+ name=Split.TEST,
102
+ gen_kwargs={"df": test_df, "split": "test", "data_dir": data_dir},
103
+ ),
104
  ]
105
 
106
+ def _generate_examples(self, df, split, data_dir):
107
+ # Set up directory paths based on the split
108
  if split == "train":
109
+ img_dir = os.path.join(data_dir, "imagesTr")
110
+ mask_dir = os.path.join(data_dir, "labelsTr")
111
  elif split == "test":
112
+ img_dir = os.path.join(data_dir, "imagesTs")
113
+ mask_dir = os.path.join(data_dir, "labelsTs")
114
  else:
115
+ raise ValueError(f"Unknown split: {split}")
116
+
117
+ # Log directories for debugging
118
+ logger.info(f"Looking for {split} images in: {img_dir}")
119
+ logger.info(f"Looking for {split} masks in: {mask_dir}")
120
+
121
+ # Verify directories exist
122
+ os.makedirs(img_dir, exist_ok=True)
123
+ os.makedirs(mask_dir, exist_ok=True)
124
+
125
+ # Track yield count for debugging
126
+ count = 0
127
+ skipped = 0
128
 
129
  for idx, row in df.iterrows():
130
  img_file = row["image"]
131
  mask_file = row["mask"]
132
+ img_path = os.path.join(img_dir, img_file)
133
+ mask_path = os.path.join(mask_dir, mask_file)
134
+
135
+ # Check if files exist
136
+ if not os.path.exists(img_path):
137
+ logger.warning(f"Image not found: {img_path}")
138
+ skipped += 1
139
+ continue
140
+
141
+ if not os.path.exists(mask_path):
142
+ logger.warning(f"Mask not found: {mask_path}")
143
+ skipped += 1
144
  continue
145
+
146
  try:
147
+ # Load and convert the image and mask data
148
+ img_nib = nib.load(img_path)
149
+ image = img_nib.get_fdata().astype("float32")
150
+
151
+ mask_nib = nib.load(mask_path)
152
+ mask = mask_nib.get_fdata().astype("float32")
153
+
154
+ yield idx, {
155
+ "image": image,
156
+ "mask": mask,
157
+ "image_path": img_path,
158
+ "mask_path": mask_path,
159
+ }
160
+ count += 1
161
+
162
  except Exception as e:
163
+ logger.error(f"Error processing {img_path} and {mask_path}: {e}")
164
+ skipped += 1
165
 
166
+ logger.info(
167
+ f"Successfully yielded {count} examples for {split} split, skipped {skipped}"
168
+ )