YongchengYAO commited on
Commit
302878f
·
1 Parent(s): 9aa169b
Files changed (4) hide show
  1. Images.zip +1 -1
  2. Landmarks-fig.zip +1 -1
  3. Landmarks.zip +2 -2
  4. get_dataset.py +372 -0
Images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:26cba2037c0602599c2cea38642ea7d15eabd168ff2695bce4683cab854241c2
3
  size 1153890344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:301b0e8619d9e5b243e174d9846d42e8253f65a27cea48cc3452b8068f098623
3
  size 1153890344
Landmarks-fig.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46c5a4630ee37c521094e70e1a9561d4859c34896c795f5b469c5925eb69c9bb
3
  size 1309339265
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfaba739a464f2f968a2b687ef86d6c531ffd6a5c26ec4163b903b3df23e6a8d
3
  size 1309339265
Landmarks.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d04130e9c217a19fbb7c0041309424965f705978c57b4bff3288776a2782fb88
3
- size 145467
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:687a3e44abdd89e2d271bd0a699da77f83ab6b8cee59c1b6edf19bbb574f8753
3
+ size 146629
get_dataset.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import shutil
4
+ import nibabel as nib
5
+ import matplotlib.pyplot as plt
6
+ import glob
7
+ import json
8
+ import rarfile
9
+ import numpy as np
10
+ import cv2
11
+ from pathlib import Path
12
+ import argparse
13
+
14
+
15
+ # ====================================
16
+ # Dataset Info [!]
17
+ # ====================================
18
+ # Dataset: Cephalogram400
19
+ # Data (original): https://figshare.com/s/37ec464af8e81ae6ebbf
20
+ # Data (HF): https://huggingface.co/datasets/YongchengYAO/Cephalogram400
21
+ # Format (original): bm
22
+ # Format (HF): nii.gz
23
+ # ====================================
24
+
25
+
26
+ def convert_bmp_to_niigz(
27
+ bmp_dir,
28
+ niigz_dir,
29
+ slice_dim_type,
30
+ pseudo_voxel_size,
31
+ slip_x=False,
32
+ slip_y=False,
33
+ swap_xy=False,
34
+ ):
35
+ """
36
+ Convert BMP image files to NIfTI (.nii.gz) format.
37
+ This function converts 2D BMP images to 3D NIfTI volumes with specified slice orientation.
38
+ The output NIfTI files will have RAS+ orientation with specified voxel size.
39
+ Args:
40
+ in_dir (str): Input directory containing BMP files to convert
41
+ out_dir (str): Output directory where NIfTI files will be saved
42
+ slice_dim_type (int): Slice dimension/orientation type:
43
+ 0: Sagittal (YZ plane)
44
+ 1: Coronal (XZ plane)
45
+ 2: Axial (XY plane)
46
+ pseudo_voxel_size (list): List of 3 floats specifying voxel dimensions in mm [x,y,z]
47
+ swap_xy (bool, optional): If True, swap X and Y dimensions. Defaults to False.
48
+ slip_x (bool, optional): If True, flip image along X axis. Defaults to False.
49
+ slip_y (bool, optional): If True, flip image along Y axis. Defaults to False.
50
+ Returns:
51
+ tuple: Original image dimensions (height, width) of the first converted BMP
52
+ """
53
+
54
+ # Validate slice_dim_type
55
+ if slice_dim_type not in [0, 1, 2]:
56
+ raise ValueError("slice_dim_type must be 0, 1, or 2")
57
+
58
+ # Convert pseudo_voxel_size to list if it's not already
59
+ pseudo_voxel_size = list(pseudo_voxel_size)
60
+
61
+ # Create output directory
62
+ Path(niigz_dir).mkdir(parents=True, exist_ok=True)
63
+
64
+ # Get all BMP files
65
+ bmp_files = list(Path(bmp_dir).glob("*.bmp"))
66
+ print(f"Found {len(bmp_files)} .bmp files")
67
+
68
+ for bmp_file in bmp_files:
69
+ try:
70
+ print(f"Converting {bmp_file.name}")
71
+
72
+ # Read BMP image
73
+ img_2d = cv2.imread(str(bmp_file), cv2.IMREAD_GRAYSCALE)
74
+ height_orig, width_orig = img_2d.shape
75
+
76
+ # Note: this is definitely correct, DO NOT SWAP the order of transformations
77
+ if slip_x:
78
+ img_2d = cv2.flip(img_2d, 0) # 0 means flip vertically
79
+ if slip_y:
80
+ img_2d = cv2.flip(img_2d, 1) # 1 means flip horizontally
81
+ if swap_xy: # this line should be AFTER slip_x and slip_y
82
+ img_2d = np.swapaxes(img_2d, 0, 1)
83
+
84
+ # Create 3D array based on slice_dim_type
85
+ if slice_dim_type == 0: # Sagittal (YZ plane)
86
+ img_3d = np.zeros(
87
+ (1, img_2d.shape[0], img_2d.shape[1]), dtype=img_2d.dtype
88
+ )
89
+ img_3d[0, :, :] = img_2d
90
+ elif slice_dim_type == 1: # Coronal (XZ plane)
91
+ img_3d = np.zeros(
92
+ (img_2d.shape[0], 1, img_2d.shape[1]), dtype=img_2d.dtype
93
+ )
94
+ img_3d[:, 0, :] = img_2d
95
+ else: # Axial (XY plane)
96
+ img_3d = np.zeros(
97
+ (img_2d.shape[0], img_2d.shape[1], 1), dtype=img_2d.dtype
98
+ )
99
+ img_3d[:, :, 0] = img_2d
100
+
101
+ # Create affine matrix for RAS+ orientation
102
+ # Set voxel size to 0.1mm in all dimensions
103
+ affine = np.diag(pseudo_voxel_size + [1])
104
+
105
+ # Create NIfTI image
106
+ nii_img = nib.Nifti1Image(img_3d, affine)
107
+
108
+ # Set header information
109
+ nii_img.header.set_zooms(pseudo_voxel_size)
110
+
111
+ # Save as NIfTI file
112
+ output_file = Path(niigz_dir) / f"{bmp_file.stem}.nii.gz"
113
+ nib.save(nii_img, str(output_file))
114
+ print(f"Saved to {output_file}")
115
+
116
+ except Exception as e:
117
+ print(f"Error converting {bmp_file.name}: {e}")
118
+
119
+ return height_orig, width_orig
120
+
121
+
122
+ def process_landmarks_data(
123
+ landmarks_txt_dir: str,
124
+ landmarks_json_dir: str,
125
+ n: int,
126
+ height_width_orig,
127
+ slip_x=False,
128
+ slip_y=False,
129
+ swap_xy=False,
130
+ ) -> None:
131
+ """
132
+ Read landmark points from all txt files in a directory and save as JSON files.
133
+
134
+ Args:
135
+ in_dir (str): Directory containing the txt files
136
+ out_dir (str): Directory where JSON files will be saved
137
+ n (int): Number of lines to read from each file
138
+ height_width_orig: Original height and width of the image
139
+ swap_xy (bool): Whether to swap x and y coordinates
140
+ slip_x (bool): Whether to flip coordinates along x-axis
141
+ slip_y (bool): Whether to flip coordinates along y-axis
142
+ """
143
+ (
144
+ os.makedirs(landmarks_json_dir, exist_ok=True)
145
+ if not os.path.exists(landmarks_json_dir)
146
+ else None
147
+ )
148
+
149
+ for txt_file in glob.glob(os.path.join(landmarks_txt_dir, "*.txt")):
150
+ result = {}
151
+ filename = os.path.basename(txt_file)
152
+ json_path = os.path.join(landmarks_json_dir, filename.replace(".txt", ".json"))
153
+
154
+ try:
155
+ with open(txt_file, "r") as f:
156
+ for i in range(n):
157
+ line = f.readline().strip()
158
+ if not line:
159
+ break
160
+ # Note: this is definitely correct, DO NOT SWAP idx_dim1 and idx_dim2
161
+ # Assuming an image with height and width:
162
+ # - The data array read from bmp file is of size (height, width) -- dim1 is height, dim2 is width
163
+ # - The landmark coordinates are defined as the indices in width (coordinate 1) and height (coordinate 2) directions
164
+ idx_dim2, idx_dim1 = map(int, line.split(","))
165
+
166
+ # Apply transformations
167
+ # Note: this is definitely correct, DO NOT SWAP the order of transformations
168
+ if slip_x:
169
+ idx_dim1 = height_width_orig[0] - idx_dim1
170
+ if slip_y:
171
+ idx_dim2 = height_width_orig[1] - idx_dim2
172
+ if swap_xy: # this line should be AFTER slip_x and slip_y
173
+ idx_dim1, idx_dim2 = idx_dim2, idx_dim1
174
+
175
+ result[f"P{i+1}"] = [1, idx_dim1, idx_dim2]
176
+
177
+ # Save to JSON
178
+ with open(json_path, "w") as f:
179
+ json.dump(result, f, indent=4)
180
+
181
+ except FileNotFoundError:
182
+ print(f"Error: File {txt_file} not found")
183
+ except ValueError:
184
+ print(f"Error: Invalid format in file {txt_file}")
185
+ except Exception as e:
186
+ print(f"Error reading file {txt_file}: {str(e)}")
187
+
188
+
189
+ def plot_slice_with_landmarks(nii_path: str, json_path: str, fig_path: str = None):
190
+ """Plot first slice from NIfTI file and overlay landmarks from JSON file.
191
+
192
+ Args:
193
+ nii_path (str): Path to .nii.gz file
194
+ json_path (str): Path to landmarks JSON file
195
+ fig_path (str, optional): Path to save the plot. If None, displays plot
196
+ """
197
+ # Load NIfTI image and extract first slice
198
+ nii_img = nib.load(nii_path)
199
+ slice_data = nii_img.get_fdata()[0, :, :]
200
+
201
+ # Load landmark coordinates from JSON
202
+ with open(json_path, "r") as f:
203
+ landmarks = json.load(f)
204
+
205
+ # Setup visualization
206
+ plt.figure(figsize=(12, 12))
207
+ plt.imshow(
208
+ slice_data.T, cmap="gray", origin="lower"
209
+ ) # the transpose is necessary only for visualization
210
+
211
+ # Extract and plot landmark coordinates
212
+ x_coords = []
213
+ y_coords = []
214
+ for point_id, coords in landmarks.items():
215
+ if len(coords) == 3: # Check for valid [1, x, y] format
216
+ # Note: this is definitely correct, DO NOT SWAP coords[1] and coords[2]
217
+ x_coords.append(coords[1])
218
+ y_coords.append(coords[2])
219
+
220
+ # Add landmarks and labels
221
+ plt.scatter(
222
+ x_coords,
223
+ y_coords,
224
+ facecolors="#18A727",
225
+ edgecolors="black",
226
+ marker="o",
227
+ s=30,
228
+ linewidth=1,
229
+ )
230
+ for i, (x, y) in enumerate(zip(x_coords, y_coords), 1):
231
+ plt.annotate(
232
+ f"{i}", (x, y), xytext=(2, 2), textcoords="offset points", color="#FE9100"
233
+ )
234
+
235
+ # Configure plot appearance
236
+ plt.axis("on")
237
+ plt.xlabel("Posterior to Anterior")
238
+ plt.ylabel("Inferior to Superior")
239
+
240
+ # Save or display the plot
241
+ if fig_path:
242
+ plt.savefig(fig_path, bbox_inches="tight", dpi=300)
243
+ print(f"Plot saved to: {fig_path}")
244
+ else:
245
+ plt.show()
246
+
247
+ plt.close()
248
+
249
+
250
+ def plot_slice_with_landmarks_batch(image_dir: str, landmark_dir: str, fig_dir: str):
251
+ """Plot all cases from given directories.
252
+
253
+ Args:
254
+ image_dir (str): Directory containing .nii.gz files
255
+ landmark_dir (str): Directory containing landmark JSON files
256
+ fig_dir (str): Directory to save output figures
257
+
258
+ """
259
+ # Create output directory if it doesn't exist
260
+ os.makedirs(fig_dir, exist_ok=True)
261
+
262
+ # Process each .nii.gz file
263
+ for nii_path in glob.glob(os.path.join(image_dir, "*.nii.gz")):
264
+ base_name = os.path.splitext(os.path.splitext(os.path.basename(nii_path))[0])[0]
265
+ json_path = os.path.join(landmark_dir, f"{base_name}.json")
266
+ fig_path = os.path.join(fig_dir, f"{base_name}.png")
267
+
268
+ # Plot and save
269
+ if os.path.exists(json_path):
270
+ plot_slice_with_landmarks(nii_path, json_path, fig_path)
271
+ else:
272
+ print(f"Warning: No landmark file found for {base_name}")
273
+
274
+
275
+ def download_and_extract(dataset_dir, dataset_name):
276
+ # Download files
277
+ print(f"Downloading {dataset_name} dataset to {dataset_dir}...")
278
+
279
+ # ====================================
280
+ # Add download logic here [!]
281
+ # ====================================
282
+ # Download the file using curl
283
+ url = "https://figshare.com/ndownloader/articles/3471833?private_link=37ec464af8e81ae6ebbf"
284
+ output_file = "Cephalogram400.zip"
285
+ subprocess.run(["curl", url, "-o", output_file], check=True)
286
+
287
+ # Extract the ZIP file
288
+ print("Extracting ZIP file...")
289
+ subprocess.run(["unzip", output_file], check=True)
290
+
291
+ # Find and extract all RAR files
292
+ print("Extracting RAR files...")
293
+ for file in os.listdir("."):
294
+ if file.endswith(".rar"):
295
+ with rarfile.RarFile(file) as rf:
296
+ rf.extractall()
297
+
298
+ # Create the Images-raw directory
299
+ os.makedirs("Images-raw", exist_ok=True)
300
+
301
+ # Move all BMP files from RawImage to Images-raw using glob
302
+ for src_path in glob.glob(f"RawImage/**/*.bmp", recursive=True):
303
+ shutil.move(src_path, os.path.join("Images-raw", os.path.basename(src_path)))
304
+
305
+ # Convert BMP files to 3D nii.gz
306
+ height_orig, width_orig = convert_bmp_to_niigz(
307
+ "Images-raw",
308
+ "Images",
309
+ slice_dim_type=0,
310
+ pseudo_voxel_size=[0.1, 0.1, 0.1],
311
+ slip_x=True,
312
+ slip_y=False,
313
+ swap_xy=True,
314
+ )
315
+
316
+ # Read landmark points from txt files and save as JSON
317
+ process_landmarks_data(
318
+ "400_senior",
319
+ "Landmarks",
320
+ 19,
321
+ height_width_orig=[height_orig, width_orig],
322
+ slip_x=True,
323
+ slip_y=False,
324
+ swap_xy=True,
325
+ )
326
+
327
+ # Plot slices with landmarks
328
+ plot_slice_with_landmarks_batch("Images", "Landmarks", "Landmarks-fig")
329
+
330
+ # Clean up
331
+ for dir_name in [
332
+ "RawImage",
333
+ "400_junior",
334
+ "400_senior",
335
+ "Images-raw",
336
+ "EvaluationCode",
337
+ ]:
338
+ shutil.rmtree(dir_name, ignore_errors=True)
339
+ for file in os.listdir("."):
340
+ if file.endswith((".rar", ".zip")):
341
+ os.remove(file)
342
+ # ====================================
343
+
344
+ print(f"Download and extraction completed for {dataset_name}")
345
+
346
+
347
+ if __name__ == "__main__":
348
+ # Set up argument parser
349
+ parser = argparse.ArgumentParser(description="Download and extract dataset")
350
+ parser.add_argument(
351
+ "-d",
352
+ "--dir_datasets_data",
353
+ help="Directory path where datasets will be stored",
354
+ required=True,
355
+ )
356
+ parser.add_argument(
357
+ "-n",
358
+ "--dataset_name",
359
+ help="Name of the dataset",
360
+ required=True,
361
+ )
362
+ args = parser.parse_args()
363
+
364
+ # Create dataset directory
365
+ dataset_dir = os.path.join(args.dir_datasets_data, args.dataset_name)
366
+ os.makedirs(dataset_dir, exist_ok=True)
367
+
368
+ # Change to dataset directory
369
+ os.chdir(dataset_dir)
370
+
371
+ # Download and extract dataset
372
+ download_and_extract(dataset_dir, args.dataset_name)