Datasets:
Tasks:
Object Detection
Formats:
webdataset
Languages:
English
Size:
< 1K
ArXiv:
Tags:
webdataset
License:
import os | |
import json | |
from PIL import Image | |
import multiprocessing | |
from tqdm import tqdm | |
# Configuration | |
YOLO_DIR = "8_calves_yolo" | |
COCO_DIR = "8_calves_coco" | |
CATEGORIES = [{"id": 1, "name": "cow"}] | |
NUM_WORKERS = multiprocessing.cpu_count() # Use all available cores | |
def process_image(args): | |
image_path, label_path, image_id = args | |
try: | |
with Image.open(image_path) as img: | |
width, height = img.size | |
except Exception as e: | |
print(f"Error opening {image_path}: {e}") | |
return None, [] | |
image_info = { | |
"id": image_id, | |
"file_name": os.path.relpath(image_path, COCO_DIR), | |
"width": width, | |
"height": height, | |
} | |
annotations = [] | |
if os.path.exists(label_path): | |
try: | |
with open(label_path, "r") as f: | |
lines = f.readlines() | |
except Exception as e: | |
print(f"Error reading {label_path}: {e}") | |
return image_info, [] | |
for line in lines: | |
parts = line.strip().split() | |
if len(parts) != 5: | |
continue | |
try: | |
class_id = int(parts[0]) | |
x_center, y_center = float(parts[1]), float(parts[2]) | |
w, h = float(parts[3]), float(parts[4]) | |
except: | |
print(f"Error parsing line in {label_path}: {line}") | |
continue | |
if class_id != 0: | |
continue | |
# Convert YOLO to COCO bbox with boundary checks | |
w_abs = w * width | |
h_abs = h * height | |
x_min = max(0, (x_center * width) - w_abs/2) | |
y_min = max(0, (y_center * height) - h_abs/2) | |
w_abs = min(width - x_min, w_abs) | |
h_abs = min(height - y_min, h_abs) | |
annotations.append({ | |
"image_id": image_id, | |
"category_id": 1, | |
"bbox": [x_min, y_min, w_abs, h_abs], | |
"area": w_abs * h_abs, | |
"iscrowd": 0, | |
}) | |
return image_info, annotations | |
def process_split(split): | |
split_dir = os.path.join(YOLO_DIR, split) | |
image_dir = os.path.join(split_dir, "images") | |
label_dir = os.path.join(split_dir, "labels") | |
if not os.path.exists(image_dir): | |
print(f"Skipping {split} - no image directory") | |
return | |
# Get sorted list of image files | |
image_files = sorted([ | |
f for f in os.listdir(image_dir) | |
if f.lower().endswith(".png") | |
]) | |
# Prepare arguments for parallel processing | |
tasks = [] | |
for idx, image_file in enumerate(image_files, 1): | |
image_path = os.path.join(image_dir, image_file) | |
label_path = os.path.join(label_dir, os.path.splitext(image_file)[0] + ".txt") | |
tasks.append((image_path, label_path, idx)) | |
# Process images in parallel | |
results = [] | |
with multiprocessing.Pool(processes=NUM_WORKERS) as pool: | |
for result in tqdm(pool.imap(process_image, tasks), | |
total=len(tasks), | |
desc=f"Processing {split}"): | |
results.append(result) | |
# Collect results | |
images = [] | |
annotations = [] | |
annotation_id = 1 | |
for image_info, image_anns in results: | |
if image_info is None: | |
continue | |
images.append(image_info) | |
for ann in image_anns: | |
ann["id"] = annotation_id | |
annotations.append(ann) | |
annotation_id += 1 | |
# Create COCO format | |
coco_data = { | |
"info": { | |
"description": "COCO Dataset converted from YOLO format", | |
"version": "1.0", | |
"year": 2023, | |
"contributor": "", | |
}, | |
"licenses": [], | |
"categories": CATEGORIES, | |
"images": images, | |
"annotations": annotations, | |
} | |
# Save to JSON | |
output_path = os.path.join(COCO_DIR, f"{split}.json") | |
with open(output_path, "w") as f: | |
json.dump(coco_data, f, indent=2) | |
print(f"Saved {split} with {len(images)} images and {len(annotations)} annotations") | |
def main(): | |
os.makedirs(COCO_DIR, exist_ok=True) | |
for split in ["train", "val", "test"]: | |
process_split(split) | |
if __name__ == "__main__": | |
main() | |