|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""SynWBM dataset""" |
|
|
|
|
|
import sys |
|
|
if sys.version_info < (3, 9): |
|
|
from typing import Sequence, Generator, Tuple |
|
|
else: |
|
|
from collections.abc import Sequence, Generator |
|
|
Tuple = tuple |
|
|
|
|
|
from typing import Optional, IO |
|
|
|
|
|
import datasets |
|
|
import itertools |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
COMING SOON |
|
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
A synthetic instance segmentation dataset for white button mushrooms (Agaricus bisporus). |
|
|
The dataset incorporates rendered and generated synthetic images for training mushroom segmentation models. |
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/ABC-iRobotics/SynWBM" |
|
|
|
|
|
_LICENSE = "GNU General Public License v3.0" |
|
|
|
|
|
_LATEST_VERSIONS = { |
|
|
"all": "1.0.0", |
|
|
"blender": "1.0.0", |
|
|
"sdxl": "1.0.0", |
|
|
} |
|
|
|
|
|
BASE_URL = "https://huggingface.co/datasets/ABC-iRobotics/SynWBM/resolve/main/" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SynWBMDatasetConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for SynWBM dataset.""" |
|
|
|
|
|
def __init__(self, name: str, base_urls: Sequence[str], images_txt: str, version: Optional[str] = None, **kwargs): |
|
|
_version = _LATEST_VERSIONS[name] if version is None else version |
|
|
super(SynWBMDatasetConfig, self).__init__(version=datasets.Version(_version), name=name, **kwargs) |
|
|
with open(images_txt, 'r') as f: |
|
|
image_list = f.readlines() |
|
|
img_urls = [] |
|
|
depth_urls = [] |
|
|
mask_urls = [] |
|
|
for base_url in base_urls: |
|
|
img_urls.extend([base_url + image.strip() for image in image_list]) |
|
|
depth_urls.extend([BASE_URL + "depths/" + image.strip() for image in image_list]) |
|
|
mask_urls.extend([BASE_URL + "masks/" + image.strip() for image in image_list]) |
|
|
|
|
|
self._imgs_urls = img_urls |
|
|
self._depth_urls = depth_urls |
|
|
self._masks_urls = mask_urls |
|
|
|
|
|
|
|
|
@property |
|
|
def features(self): |
|
|
return datasets.Features( |
|
|
{ |
|
|
"image": datasets.Image(), |
|
|
"depth": datasets.Image(), |
|
|
"mask": datasets.Image(), |
|
|
} |
|
|
) |
|
|
|
|
|
@property |
|
|
def supervised_keys(self): |
|
|
return None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SynWBMDataset(datasets.GeneratorBasedBuilder): |
|
|
"""SynWBM dataset.""" |
|
|
|
|
|
BUILDER_CONFIG_CLASS = SynWBMDatasetConfig |
|
|
BUILDER_CONFIGS = [ |
|
|
SynWBMDatasetConfig( |
|
|
name = "all", |
|
|
description = "All images", |
|
|
base_urls = [ |
|
|
BASE_URL + "rendered/", |
|
|
BASE_URL + "generated/" |
|
|
], |
|
|
images_txt = "images.txt" |
|
|
), |
|
|
SynWBMDatasetConfig( |
|
|
name = "blender", |
|
|
description = "Synthetic images rendered using Blender", |
|
|
base_urls = [ |
|
|
BASE_URL + "rendered/" |
|
|
], |
|
|
images_txt = "images.txt" |
|
|
), |
|
|
SynWBMDatasetConfig( |
|
|
name = "sdxl", |
|
|
description = "Synthetic images generated by Stable Diffusion XL", |
|
|
base_urls = [ |
|
|
BASE_URL + "generated/" |
|
|
], |
|
|
images_txt = "images.txt" |
|
|
), |
|
|
] |
|
|
DEFAULT_WRITER_BATCH_SIZE = 10 |
|
|
|
|
|
def _info(self): |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=self.config.features, |
|
|
supervised_keys=self.config.supervised_keys, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
version=self.config.version, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
imgs_paths = dl_manager.download(self.config._imgs_urls) |
|
|
depths_paths = dl_manager.download(self.config._depth_urls) |
|
|
masks_paths = dl_manager.download(self.config._masks_urls) |
|
|
|
|
|
imgs_gen = itertools.chain.from_iterable([dl_manager.iter_archive(path) for path in imgs_paths]) |
|
|
depths_gen = itertools.chain.from_iterable([dl_manager.iter_archive(path) for path in depths_paths]) |
|
|
masks_gen = itertools.chain.from_iterable([dl_manager.iter_archive(path) for path in masks_paths]) |
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={ |
|
|
"images": imgs_gen, |
|
|
"depths": depths_gen, |
|
|
"masks": masks_gen, |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
def _generate_examples( |
|
|
self, |
|
|
images: Generator[Tuple[str,IO], None, None], |
|
|
depths: Generator[Tuple[str,IO], None, None], |
|
|
masks: Generator[Tuple[str,IO], None, None], |
|
|
): |
|
|
for i, (img_info, depth_info, mask_info) in enumerate(zip(images, depths, masks)): |
|
|
img_file_path, img_file_obj = img_info |
|
|
depth_file_path, depth_file_obj = depth_info |
|
|
mask_file_path, mask_file_obj = mask_info |
|
|
|
|
|
img_bytes = img_file_obj.read() |
|
|
depth_bytes = depth_file_obj.read() |
|
|
mask_bytes = mask_file_obj.read() |
|
|
img_file_obj.close() |
|
|
depth_file_obj.close() |
|
|
mask_file_obj.close() |
|
|
|
|
|
yield i, { |
|
|
"image": {"path": img_file_path, "bytes": img_bytes}, |
|
|
"depth": {"path": depth_file_path, "bytes": depth_bytes}, |
|
|
"mask": {"path": mask_file_path, "bytes": mask_bytes}, |
|
|
} |