Datasets:

Modalities:
Image
ArXiv:
License:
JeffreyJsam commited on
Commit
8d68112
·
verified ·
1 Parent(s): fc9aa1b

Script to sample/download data with requirements.txt

Browse files
Files changed (3) hide show
  1. download_swim.py +112 -0
  2. requirements.txt +4 -0
  3. sample_swim.py +83 -0
download_swim.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ from huggingface_hub import list_repo_tree, hf_hub_url, HfFileSystem
5
+ from huggingface_hub.hf_api import RepoFile
6
+ import fsspec
7
+ from PIL import Image
8
+ from tqdm import tqdm
9
+
10
+ def enumerate_chunks(repo_id, images_parent):
11
+ """
12
+ Lists all immediate chunk subdirs under the images parent using HfFileSystem.
13
+ Returns sorted list of subdir names (e.g. ['000', '001', ...]).
14
+ """
15
+ fs = HfFileSystem()
16
+ repo_path = f"datasets/{repo_id}/{images_parent}"
17
+ entries = fs.ls(repo_path, detail=True)
18
+ subdirs = [entry['name'].split('/')[-1] for entry in entries if entry['type'] == 'directory']
19
+ subdirs.sort()
20
+ return subdirs
21
+
22
+ def sample_dataset(
23
+ repo_id: str,
24
+ images_parent: str,
25
+ labels_parent: str,
26
+ output_dir: str,
27
+ # max_files: int = 500,
28
+ flatten: bool = True,
29
+ chunks: list = None
30
+ ):
31
+ total_downloaded = 0
32
+ all_chunks = chunks
33
+ if all_chunks is None:
34
+ all_chunks = enumerate_chunks(repo_id, images_parent)
35
+ print(f"Found chunks: {all_chunks}")
36
+ for chunk in all_chunks:
37
+ image_subdir = f"{images_parent}/{chunk}"
38
+ label_subdir = f"{labels_parent}/{chunk}"
39
+
40
+ # List only in the specified chunk
41
+ image_files = list_repo_tree(
42
+ repo_id=repo_id,
43
+ path_in_repo=image_subdir,
44
+ repo_type="dataset",
45
+ recursive=True,
46
+ )
47
+
48
+ for img_file in tqdm(image_files, desc=f"Downloading {chunk}", leave=False):
49
+ if not isinstance(img_file, RepoFile) or not img_file.path.lower().endswith(".png"):
50
+ continue
51
+
52
+ rel_path = Path(img_file.path).relative_to(image_subdir)
53
+ label_path = f"{label_subdir}/{rel_path.with_suffix('.txt')}"
54
+
55
+ if flatten:
56
+ parts = img_file.path.split('/')
57
+ # print(parts)
58
+ # Remove the chunk dir (second last)
59
+ flat_path = '/'.join(parts[:-2] + [parts[-1]])
60
+ # For labels, also strip the chunk and substitute extension
61
+ flat_label_path = flat_path.replace('.png', '.txt').replace('images', 'labels')
62
+ local_image_path = Path(output_dir) / flat_path
63
+ local_label_path = Path(output_dir) / flat_label_path
64
+ else:
65
+ local_image_path = Path(output_dir) / img_file.path
66
+ local_label_path = Path(output_dir) / label_path
67
+
68
+ local_image_path.parent.mkdir(parents=True, exist_ok=True)
69
+ local_label_path.parent.mkdir(parents=True, exist_ok=True)
70
+
71
+ image_url = hf_hub_url(repo_id=repo_id, filename=img_file.path, repo_type="dataset")
72
+ label_url = hf_hub_url(repo_id=repo_id, filename=label_path, repo_type="dataset")
73
+ try:
74
+ with fsspec.open(image_url) as f:
75
+ image = Image.open(BytesIO(f.read()))
76
+ image.save(local_image_path)
77
+ with fsspec.open(label_url) as f:
78
+ txt_content = f.read()
79
+ with open(local_label_path, "wb") as out_f:
80
+ out_f.write(txt_content)
81
+ total_downloaded += 1
82
+ except Exception as e:
83
+ print(f"Failed {rel_path}: {e}")
84
+
85
+
86
+
87
+
88
+ print(f"Downloaded {total_downloaded} image/txt pairs.")
89
+ print(f"Saved under: {Path(output_dir).resolve()}")
90
+
91
+ def parse_args():
92
+ parser = argparse.ArgumentParser(description="Stream and sample paired images + txt labels from a Hugging Face folder-structured dataset, optionally across multiple chunks.")
93
+ parser.add_argument("--repo-id", default="JeffreyJsam/SWiM-SpacecraftWithMasks", help="Hugging Face dataset repo ID.")
94
+ parser.add_argument("--images-parent", default="Baseline/images/val", help="Parent directory for image chunks.")
95
+ parser.add_argument("--labels-parent", default="Baseline/labels/val", help="Parent directory for label chunks.")
96
+ parser.add_argument("--output-dir", default="./SWiM", help="Where to save sampled data.")
97
+ #parser.add_argument("--count", type=int, default=500, help="How many samples to download in total.")
98
+ parser.add_argument("--flatten", default=True, type=bool, help="Save all samples in a single folder without subdirectories.")
99
+ parser.add_argument("--chunks", nargs="*", default=None, help="Specific chunk names to sample (e.g. 000 001). Leave empty to process all.")
100
+ return parser.parse_args()
101
+
102
+ if __name__ == "__main__":
103
+ args = parse_args()
104
+ sample_dataset(
105
+ repo_id=args.repo_id,
106
+ images_parent=args.images_parent,
107
+ labels_parent=args.labels_parent,
108
+ output_dir=args.output_dir,
109
+ # max_files=args.count,
110
+ flatten=args.flatten,
111
+ chunks=args.chunks
112
+ )
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ huggingface-hub>=0.23.0
2
+ fsspec>=2024.6.0
3
+ Pillow>=10.3.0
4
+ tqdm>=4.66.4
sample_swim.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ from io import BytesIO
3
+ from pathlib import Path
4
+ from huggingface_hub import list_repo_tree, hf_hub_url
5
+ from huggingface_hub.hf_api import RepoFile
6
+ import fsspec
7
+ from PIL import Image
8
+ from tqdm import tqdm
9
+
10
+ def sample_dataset(
11
+ repo_id: str,
12
+ image_subdir: str,
13
+ label_subdir: str,
14
+ output_dir: str,
15
+ max_files: int = 500,
16
+ ):
17
+
18
+ image_files = list_repo_tree(
19
+ repo_id=repo_id,
20
+ path_in_repo=image_subdir,
21
+ repo_type="dataset",
22
+ recursive=True
23
+ )
24
+
25
+ count = 0
26
+ for img_file in tqdm(image_files, desc="Downloading samples"):
27
+ if not isinstance(img_file, RepoFile) or not img_file.path.lower().endswith((".png")):
28
+ continue
29
+
30
+ # Relative path after the image_subdir (e.g., img_0001.png)
31
+ rel_path = Path(img_file.path).relative_to(image_subdir)
32
+ label_path = f"{label_subdir}/{rel_path.with_suffix('.txt')}" # Change extension to .txt
33
+
34
+ image_url = hf_hub_url(repo_id=repo_id, filename=img_file.path, repo_type="dataset")
35
+ label_url = hf_hub_url(repo_id=repo_id, filename=label_path, repo_type="dataset")
36
+
37
+ local_image_path = Path(output_dir) / img_file.path
38
+ local_label_path = Path(output_dir) / label_path
39
+
40
+ local_image_path.parent.mkdir(parents=True, exist_ok=True)
41
+ local_label_path.parent.mkdir(parents=True, exist_ok=True)
42
+
43
+ try:
44
+ # Download and save the image
45
+ with fsspec.open(image_url) as f:
46
+ image = Image.open(BytesIO(f.read()))
47
+ image.save(local_image_path)
48
+
49
+ # Download and save the corresponding .txt label
50
+ with fsspec.open(label_url) as f:
51
+ txt_content = f.read()
52
+ with open(local_label_path, "wb") as out_f:
53
+ out_f.write(txt_content)
54
+
55
+ # print(f"[{count+1}] {rel_path} and {rel_path.with_suffix('.txt')}")
56
+ count += 1
57
+ except Exception as e:
58
+ print(f" Failed {rel_path}: {e}")
59
+
60
+ if count >= max_files:
61
+ break
62
+
63
+ print(f" Downloaded {count} image/txt pairs.")
64
+ print(f" Saved under: {Path(output_dir).resolve()}")
65
+
66
+ def parse_args():
67
+ parser = argparse.ArgumentParser(description="Stream and sample paired images + txt labels from a Hugging Face folder-structured dataset.")
68
+ parser.add_argument("--repo-id", required=False, default = "JeffreyJsam/SWiM-SpacecraftWithMasks",help="Hugging Face dataset repo ID.")
69
+ parser.add_argument("--image-subdir", required=False, default = "Baseline/images/val/000", help="Subdirectory path for images.")
70
+ parser.add_argument("--label-subdir", required=False, default="Baseline/labels/val/000", help="Subdirectory path for txt masks.")
71
+ parser.add_argument("--output-dir", default="./Sampled-SWiM", help="Where to save sampled data.")
72
+ parser.add_argument("--count", type=int, default=500, help="How many samples to download.")
73
+ return parser.parse_args()
74
+
75
+ if __name__ == "__main__":
76
+ args = parse_args()
77
+ sample_dataset(
78
+ repo_id=args.repo_id,
79
+ image_subdir=args.image_subdir,
80
+ label_subdir=args.label_subdir,
81
+ output_dir=args.output_dir,
82
+ max_files=args.count,
83
+ )