YongchengYAO commited on
Commit
72a5419
·
1 Parent(s): a9104a9
Files changed (1) hide show
  1. OAIZIB-CM.py +27 -42
OAIZIB-CM.py CHANGED
@@ -14,8 +14,6 @@ from datasets import (
14
 
15
  logger = datasets.logging.get_logger(__name__)
16
 
17
- EXPECTED_SHAPE = (160, 384, 384)
18
-
19
  _CITATION = """\
20
  @article{YAO2024103035,
21
  title = {CartiMorph: A framework for automated knee articular cartilage morphometrics},
@@ -61,16 +59,27 @@ _HOMEPAGE_URL = "https://github.com/YongchengYAO/CartiMorph"
61
  class OAIZIBCMDataset(GeneratorBasedBuilder):
62
  VERSION = datasets.Version("1.0.0")
63
 
 
 
 
 
 
 
 
 
 
 
 
64
  def _info(self):
65
  # Define dataset information including feature schema
66
  return DatasetInfo(
67
  description=_DESCRIPTION,
68
- features=Features({
69
- "image": Array3D(shape=EXPECTED_SHAPE, dtype="float32"),
70
- "mask": Array3D(shape=EXPECTED_SHAPE, dtype="float32"),
71
- "image_path": Value("string"),
72
- "mask_path": Value("string"),
73
- }),
74
  homepage=_HOMEPAGE_URL,
75
  citation=_CITATION,
76
  )
@@ -82,14 +91,14 @@ class OAIZIBCMDataset(GeneratorBasedBuilder):
82
  test_csv_url = f"{base_url}/test.csv"
83
  csv_paths = dl_manager.download({"train": train_csv_url, "test": test_csv_url})
84
  logger.info(f"Downloaded CSV paths: {csv_paths}")
85
-
86
  # Extract main dataset archive
87
  data_root_dir = dl_manager.download_and_extract(
88
  "https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/resolve/load_dataset-support/data/OAIZIB-CM.zip"
89
  )
90
  data_dir = os.path.join(data_root_dir, "OAIZIB-CM")
91
  logger.info(f"Data directory set to {data_dir}")
92
-
93
  # Load split metadata
94
  train_df = pd.read_csv(csv_paths["train"])
95
  test_df = pd.read_csv(csv_paths["test"])
@@ -116,48 +125,24 @@ class OAIZIBCMDataset(GeneratorBasedBuilder):
116
  mask_dir = os.path.join(data_dir, "labelsTs")
117
  else:
118
  raise ValueError(f"Unknown split: {split}")
119
-
120
  # Log directories and ensure they exist
121
  logger.info(f"Looking for {split} images in: {img_dir}")
122
  logger.info(f"Looking for {split} masks in: {mask_dir}")
123
  os.makedirs(img_dir, exist_ok=True)
124
  os.makedirs(mask_dir, exist_ok=True)
125
-
126
  # Process and yield examples
127
  count = 0
128
- skipped = 0
129
  for idx, row in df.iterrows():
130
  img_file = row["image"]
131
  mask_file = row["mask"]
132
  img_path = os.path.join(img_dir, img_file)
133
  mask_path = os.path.join(mask_dir, mask_file)
 
 
 
 
 
134
 
135
- # Skip files that don't exist
136
- if not os.path.exists(img_path):
137
- logger.warning(f"Image not found: {img_path}")
138
- skipped += 1
139
- continue
140
- if not os.path.exists(mask_path):
141
- logger.warning(f"Mask not found: {mask_path}")
142
- skipped += 1
143
- continue
144
-
145
- try:
146
- # Load and process image data
147
- img_nib = nib.load(img_path)
148
- image = img_nib.get_fdata().astype("float32")
149
- mask_nib = nib.load(mask_path)
150
- mask = mask_nib.get_fdata().astype("float32")
151
-
152
- yield idx, {
153
- "image": image,
154
- "mask": mask,
155
- "image_path": img_path,
156
- "mask_path": mask_path,
157
- }
158
- count += 1
159
- except Exception as e:
160
- logger.error(f"Error processing {img_path} and {mask_path}: {e}")
161
- skipped += 1
162
-
163
- logger.info(f"Successfully yielded {count} examples for {split} split, skipped {skipped}")
 
14
 
15
  logger = datasets.logging.get_logger(__name__)
16
 
 
 
17
  _CITATION = """\
18
  @article{YAO2024103035,
19
  title = {CartiMorph: A framework for automated knee articular cartilage morphometrics},
 
59
  class OAIZIBCMDataset(GeneratorBasedBuilder):
60
  VERSION = datasets.Version("1.0.0")
61
 
62
+ @staticmethod
63
+ def load_nifti(example):
64
+ """Map function to load NIFTI images on demand."""
65
+ img_nib = nib.load(example["image_path"])
66
+ image = img_nib.get_fdata().astype("float32")
67
+ mask_nib = nib.load(example["mask_path"])
68
+ mask = mask_nib.get_fdata().astype("float32")
69
+ example["image"] = image
70
+ example["mask"] = mask
71
+ return example
72
+
73
  def _info(self):
74
  # Define dataset information including feature schema
75
  return DatasetInfo(
76
  description=_DESCRIPTION,
77
+ features=Features(
78
+ {
79
+ "image_path": Value("string"),
80
+ "mask_path": Value("string"),
81
+ }
82
+ ),
83
  homepage=_HOMEPAGE_URL,
84
  citation=_CITATION,
85
  )
 
91
  test_csv_url = f"{base_url}/test.csv"
92
  csv_paths = dl_manager.download({"train": train_csv_url, "test": test_csv_url})
93
  logger.info(f"Downloaded CSV paths: {csv_paths}")
94
+
95
  # Extract main dataset archive
96
  data_root_dir = dl_manager.download_and_extract(
97
  "https://huggingface.co/datasets/YongchengYAO/OAIZIB-CM/resolve/load_dataset-support/data/OAIZIB-CM.zip"
98
  )
99
  data_dir = os.path.join(data_root_dir, "OAIZIB-CM")
100
  logger.info(f"Data directory set to {data_dir}")
101
+
102
  # Load split metadata
103
  train_df = pd.read_csv(csv_paths["train"])
104
  test_df = pd.read_csv(csv_paths["test"])
 
125
  mask_dir = os.path.join(data_dir, "labelsTs")
126
  else:
127
  raise ValueError(f"Unknown split: {split}")
128
+
129
  # Log directories and ensure they exist
130
  logger.info(f"Looking for {split} images in: {img_dir}")
131
  logger.info(f"Looking for {split} masks in: {mask_dir}")
132
  os.makedirs(img_dir, exist_ok=True)
133
  os.makedirs(mask_dir, exist_ok=True)
134
+
135
  # Process and yield examples
136
  count = 0
 
137
  for idx, row in df.iterrows():
138
  img_file = row["image"]
139
  mask_file = row["mask"]
140
  img_path = os.path.join(img_dir, img_file)
141
  mask_path = os.path.join(mask_dir, mask_file)
142
+ # Only yield paths, don't load data into memory
143
+ yield idx, {
144
+ "image_path": img_path,
145
+ "mask_path": mask_path,
146
+ }
147
 
148
+ logger.info(f"Successfully yielded {count} examples for {split} split")