OpenHumnoidActuatedFaceData / OpenHumnoidActuatedFaceData.py
amirulofficialinfy's picture
Add files using upload-large-folder tool
81f161c verified
raw
history blame
4.96 kB
import json, random, math, os
from pathlib import Path
from datasets import (
BuilderConfig, DatasetInfo, DownloadManager, GeneratorBasedBuilder,
SplitGenerator, Split, Features, Image, Value
)
from huggingface_hub import hf_hub_url
_REPO_ID = "infosys/OpenHumnoidActuatedFaceData"
_IMAGES_PER_SHARD = 10_000 # how many files you put in each tar
_TAR_TPL = "images-{start:05d}-{end:05d}.tar" # file-name pattern
class ImageSubsetConfig(BuilderConfig):
def __init__(self, name, sample_size=None, **kw):
super().__init__(name=name, version="1.0.2",
description=kw.get("description", ""))
self.sample_size = sample_size
class MyImageDataset(GeneratorBasedBuilder):
BUILDER_CONFIGS = [
ImageSubsetConfig("full", sample_size=None,
description="Entire dataset (≈100 GB)"),
ImageSubsetConfig("small", sample_size=20_000,
description="20 K random images"),
]
DEFAULT_CONFIG_NAME = "small"
# ------------------------------------------------------------------ #
# 1. Schema #
# ------------------------------------------------------------------ #
def _info(self):
return DatasetInfo(
description="Humanoid face images + 16 servo angles.",
features=Features(
{
"image": Image(), # PIL.Image is fine
"actuated_angle":
{str(i): Value("int32") for i in range(16)},
}
),
)
# ------------------------------------------------------------------ #
# 2. Download #
# ------------------------------------------------------------------ #
def _split_generators(self, dl_manager: DownloadManager):
# ---- 2-a: load metadata -------------------------------------- #
meta_url = hf_hub_url(_REPO_ID, "metadata.json", repo_type="dataset")
meta_path = dl_manager.download(meta_url)
with open(meta_path, encoding="utf-8") as f:
metadata = json.load(f)
all_names = sorted(metadata)
selected = ( # sampling logic
sorted(random.sample(all_names, self.config.sample_size))
if self.config.sample_size else all_names
)
selected_set = set(selected)
# ---- 2-b: figure out which shards we need -------------------- #
max_idx = len(all_names) - 1
n_shards = math.floor(max_idx / _IMAGES_PER_SHARD) + 1
shard_files = [
_TAR_TPL.format(start=s*_IMAGES_PER_SHARD,
end=min((s+1)*_IMAGES_PER_SHARD-1, max_idx))
for s in range(n_shards)
]
# ---- 2-c: download (and extract) each tar -------------------- #
tar_urls = [hf_hub_url(_REPO_ID, f, repo_type="dataset")
for f in shard_files]
local_tars = dl_manager.download(tar_urls) # .tar paths
# we’ll stream from the tar, so no extract() needed
return [
SplitGenerator(
name=Split.TRAIN,
gen_kwargs={
"tar_paths": local_tars,
"metadata": metadata,
"want": selected_set,
},
)
]
# ------------------------------------------------------------------ #
# 3. Generate #
# ------------------------------------------------------------------ #
def _generate_examples(self, tar_paths, metadata, want):
"""Stream over each tar and yield only requested files."""
idx = 0
for tar_path in tar_paths:
# iterate without extraction
for inner_path, fobj in \
self._iter_archive_fast(tar_path): # helper below
fname = Path(inner_path).name # strip tar prefix
if fname not in want:
continue
angles = metadata[fname]
yield idx, {
"image": {"bytes": fobj.read(), "path": fname},
"actuated_angle":
{str(i): int(angles.get(str(i), 0)) for i in range(16)}
}
idx += 1
# Small wrapper so we don’t import datasets.utils.file_utils directly
@staticmethod
def _iter_archive_fast(tar_path):
import tarfile
with tarfile.open(tar_path) as tar:
for member in tar:
if member.isfile():
f = tar.extractfile(member)
yield member.name, f