Datasets:
File size: 6,004 Bytes
6dc0ffe 34441e2 6dc0ffe 31cda70 691b7c7 6dc0ffe 34441e2 6dc0ffe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
"""
download_swim.py
Streams and downloads the full paired dataset (images + label txt files) from a Hugging Face Hub repository.
It recursively processes all available chunk subfolders (e.g., '000', '001', ...) under given parent paths.
Features:
- Recursively discovers subdirs (chunks) using HfFileSystem
- Optionally flattens the directory structure by removing the deepest chunk level
- Saves each .png image with its corresponding .txt label
Use this script if you want to download the complete dataset for model training or offline access.
Usage:
# Download all chunks (flattened)
python utils/download_swim.py --output-dir ./SWiM --flatten
# Download specific chunks
python utils/download_swim.py --chunks 000 001 002 --flatten False
Arguments:
--repo-id Hugging Face dataset repository ID
--images-parent Parent directory for image chunks (e.g., Baseline/images/train)
--labels-parent Parent directory for label chunks (e.g., Baseline/labels/train)
--output-dir Where to save the downloaded dataset
--flatten Remove final 'chunk' subdir in output paths (default: True)
--chunks Specific chunk names (e.g., 000 001); omit to download all
"""
import argparse
from io import BytesIO
from pathlib import Path
from huggingface_hub import list_repo_tree, hf_hub_url, HfFileSystem
from huggingface_hub.hf_api import RepoFile
import fsspec
from PIL import Image
from tqdm import tqdm
def enumerate_chunks(repo_id, images_parent):
"""
Lists all immediate chunk subdirs under the images parent using HfFileSystem.
Returns sorted list of subdir names (e.g. ['000', '001', ...]).
"""
fs = HfFileSystem()
repo_path = f"datasets/{repo_id}/{images_parent}"
entries = fs.ls(repo_path, detail=True)
subdirs = [entry['name'].split('/')[-1] for entry in entries if entry['type'] == 'directory']
subdirs.sort()
return subdirs
def sample_dataset(
repo_id: str,
images_parent: str,
labels_parent: str,
output_dir: str,
# max_files: int = 500,
flatten: bool,
chunks: list = None
):
total_downloaded = 0
all_chunks = chunks
if all_chunks is None:
all_chunks = enumerate_chunks(repo_id, images_parent)
print(f"Found chunks: {all_chunks}")
for chunk in all_chunks:
image_subdir = f"{images_parent}/{chunk}"
label_subdir = f"{labels_parent}/{chunk}"
# List only in the specified chunk
image_files = list_repo_tree(
repo_id=repo_id,
path_in_repo=image_subdir,
repo_type="dataset",
recursive=True,
)
for img_file in tqdm(image_files, desc=f"Downloading {chunk}", leave=False):
if not isinstance(img_file, RepoFile) or not img_file.path.lower().endswith(".png"):
continue
rel_path = Path(img_file.path).relative_to(image_subdir)
label_path = f"{label_subdir}/{rel_path.with_suffix('.txt')}"
if flatten:
parts = img_file.path.split('/')
# print(parts)
# Remove the chunk dir (second last)
flat_path = '/'.join(parts[:-2] + [parts[-1]])
# For labels, also strip the chunk and substitute extension
flat_label_path = flat_path.replace('.png', '.txt').replace('images', 'labels')
local_image_path = Path(output_dir) / flat_path
local_label_path = Path(output_dir) / flat_label_path
else:
local_image_path = Path(output_dir) / img_file.path
local_label_path = Path(output_dir) / label_path
local_image_path.parent.mkdir(parents=True, exist_ok=True)
local_label_path.parent.mkdir(parents=True, exist_ok=True)
image_url = hf_hub_url(repo_id=repo_id, filename=img_file.path, repo_type="dataset")
label_url = hf_hub_url(repo_id=repo_id, filename=label_path, repo_type="dataset")
try:
with fsspec.open(image_url) as f:
image = Image.open(BytesIO(f.read()))
image.save(local_image_path)
with fsspec.open(label_url) as f:
txt_content = f.read()
with open(local_label_path, "wb") as out_f:
out_f.write(txt_content)
total_downloaded += 1
except Exception as e:
print(f"Failed {rel_path}: {e}")
print(f"Downloaded {total_downloaded} image/txt pairs.")
print(f"Saved under: {Path(output_dir).resolve()}")
def parse_args():
parser = argparse.ArgumentParser(description="Stream and sample paired images + txt labels from a Hugging Face folder-structured dataset, optionally across multiple chunks.")
parser.add_argument("--repo-id", default="RiceD2KLab/SWiM-SpacecraftWithMasks", help="Hugging Face dataset repo ID.")
parser.add_argument("--images-parent", default="Baseline/images", help="Parent directory for image chunks.")
parser.add_argument("--labels-parent", default="Baseline/labels", help="Parent directory for label chunks.")
parser.add_argument("--output-dir", default="./SWiM", help="Where to save sampled data.")
#parser.add_argument("--count", type=int, default=500, help="How many samples to download in total.")
parser.add_argument("--flatten", action='store_true', help="Save all samples in a single folder without subdirectories.")
parser.add_argument("--chunks", nargs="*", default=None, help="Specific chunk names to sample (e.g. 000 001). Leave empty to process all.")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
sample_dataset(
repo_id=args.repo_id,
images_parent=args.images_parent,
labels_parent=args.labels_parent,
output_dir=args.output_dir,
# max_files=args.count,
flatten=args.flatten,
chunks=args.chunks
)
|