Datasets:

Modalities:
Image
ArXiv:
License:
JeffreyJsam commited on
Commit
6dc0ffe
·
verified ·
1 Parent(s): c087755

Upload download_swim.py

Browse files
Files changed (1) hide show
  1. utils/download_swim.py +143 -0
utils/download_swim.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ download_swim.py
3
+
4
+ Streams and downloads the full paired dataset (images + label txt files) from a Hugging Face Hub repository.
5
+ It recursively processes all available chunk subfolders (e.g., '000', '001', ...) under given parent paths.
6
+
7
+ Features:
8
+ - Recursively discovers subdirs (chunks) using HfFileSystem
9
+ - Optionally flattens the directory structure by removing the deepest chunk level
10
+ - Saves each .png image with its corresponding .txt label
11
+
12
+ Use this script if you want to download the complete dataset for model training or offline access.
13
+
14
+ Usage:
15
+ # Download all chunks (flattened)
16
+ python utils/download_swim.py --output-dir ./SWiM --flatten
17
+
18
+ # Download specific chunks
19
+ python utils/download_swim.py --chunks 000 001 002 --flatten False
20
+
21
+ Arguments:
22
+ --repo-id Hugging Face dataset repository ID
23
+ --images-parent Parent directory for image chunks (e.g., Baseline/images/train)
24
+ --labels-parent Parent directory for label chunks (e.g., Baseline/labels/train)
25
+ --output-dir Where to save the downloaded dataset
26
+ --flatten Remove final 'chunk' subdir in output paths (default: True)
27
+ --chunks Specific chunk names (e.g., 000 001); omit to download all
28
+ """
29
+
30
+
31
+
32
+ import argparse
33
+ from io import BytesIO
34
+ from pathlib import Path
35
+ from huggingface_hub import list_repo_tree, hf_hub_url, HfFileSystem
36
+ from huggingface_hub.hf_api import RepoFile
37
+ import fsspec
38
+ from PIL import Image
39
+ from tqdm import tqdm
40
+
41
+ def enumerate_chunks(repo_id, images_parent):
42
+ """
43
+ Lists all immediate chunk subdirs under the images parent using HfFileSystem.
44
+ Returns sorted list of subdir names (e.g. ['000', '001', ...]).
45
+ """
46
+ fs = HfFileSystem()
47
+ repo_path = f"datasets/{repo_id}/{images_parent}"
48
+ entries = fs.ls(repo_path, detail=True)
49
+ subdirs = [entry['name'].split('/')[-1] for entry in entries if entry['type'] == 'directory']
50
+ subdirs.sort()
51
+ return subdirs
52
+
53
+ def sample_dataset(
54
+ repo_id: str,
55
+ images_parent: str,
56
+ labels_parent: str,
57
+ output_dir: str,
58
+ # max_files: int = 500,
59
+ flatten: bool = True,
60
+ chunks: list = None
61
+ ):
62
+ total_downloaded = 0
63
+ all_chunks = chunks
64
+ if all_chunks is None:
65
+ all_chunks = enumerate_chunks(repo_id, images_parent)
66
+ print(f"Found chunks: {all_chunks}")
67
+ for chunk in all_chunks:
68
+ image_subdir = f"{images_parent}/{chunk}"
69
+ label_subdir = f"{labels_parent}/{chunk}"
70
+
71
+ # List only in the specified chunk
72
+ image_files = list_repo_tree(
73
+ repo_id=repo_id,
74
+ path_in_repo=image_subdir,
75
+ repo_type="dataset",
76
+ recursive=True,
77
+ )
78
+
79
+ for img_file in tqdm(image_files, desc=f"Downloading {chunk}", leave=False):
80
+ if not isinstance(img_file, RepoFile) or not img_file.path.lower().endswith(".png"):
81
+ continue
82
+
83
+ rel_path = Path(img_file.path).relative_to(image_subdir)
84
+ label_path = f"{label_subdir}/{rel_path.with_suffix('.txt')}"
85
+
86
+ if flatten:
87
+ parts = img_file.path.split('/')
88
+ # print(parts)
89
+ # Remove the chunk dir (second last)
90
+ flat_path = '/'.join(parts[:-2] + [parts[-1]])
91
+ # For labels, also strip the chunk and substitute extension
92
+ flat_label_path = flat_path.replace('.png', '.txt').replace('images', 'labels')
93
+ local_image_path = Path(output_dir) / flat_path
94
+ local_label_path = Path(output_dir) / flat_label_path
95
+ else:
96
+ local_image_path = Path(output_dir) / img_file.path
97
+ local_label_path = Path(output_dir) / label_path
98
+
99
+ local_image_path.parent.mkdir(parents=True, exist_ok=True)
100
+ local_label_path.parent.mkdir(parents=True, exist_ok=True)
101
+
102
+ image_url = hf_hub_url(repo_id=repo_id, filename=img_file.path, repo_type="dataset")
103
+ label_url = hf_hub_url(repo_id=repo_id, filename=label_path, repo_type="dataset")
104
+ try:
105
+ with fsspec.open(image_url) as f:
106
+ image = Image.open(BytesIO(f.read()))
107
+ image.save(local_image_path)
108
+ with fsspec.open(label_url) as f:
109
+ txt_content = f.read()
110
+ with open(local_label_path, "wb") as out_f:
111
+ out_f.write(txt_content)
112
+ total_downloaded += 1
113
+ except Exception as e:
114
+ print(f"Failed {rel_path}: {e}")
115
+
116
+
117
+
118
+
119
+ print(f"Downloaded {total_downloaded} image/txt pairs.")
120
+ print(f"Saved under: {Path(output_dir).resolve()}")
121
+
122
+ def parse_args():
123
+ parser = argparse.ArgumentParser(description="Stream and sample paired images + txt labels from a Hugging Face folder-structured dataset, optionally across multiple chunks.")
124
+ parser.add_argument("--repo-id", default="JeffreyJsam/SWiM-SpacecraftWithMasks", help="Hugging Face dataset repo ID.")
125
+ parser.add_argument("--images-parent", default="Baseline/images/val", help="Parent directory for image chunks.")
126
+ parser.add_argument("--labels-parent", default="Baseline/labels/val", help="Parent directory for label chunks.")
127
+ parser.add_argument("--output-dir", default="./SWiM", help="Where to save sampled data.")
128
+ #parser.add_argument("--count", type=int, default=500, help="How many samples to download in total.")
129
+ parser.add_argument("--flatten", default=True, type=bool, help="Save all samples in a single folder without subdirectories.")
130
+ parser.add_argument("--chunks", nargs="*", default=None, help="Specific chunk names to sample (e.g. 000 001). Leave empty to process all.")
131
+ return parser.parse_args()
132
+
133
+ if __name__ == "__main__":
134
+ args = parse_args()
135
+ sample_dataset(
136
+ repo_id=args.repo_id,
137
+ images_parent=args.images_parent,
138
+ labels_parent=args.labels_parent,
139
+ output_dir=args.output_dir,
140
+ # max_files=args.count,
141
+ flatten=args.flatten,
142
+ chunks=args.chunks
143
+ )