Add files using upload-large-folder tool
Browse files- .gitattributes +1 -0
- OpenHumnoidActuatedFaceData.py +119 -0
- images-00000-09999.tar +3 -0
- images-10000-19999.tar +3 -0
- images-100000-109999.tar +3 -0
- images-110000-119999.tar +3 -0
- images-120000-129999.tar +3 -0
- images-130000-135235.tar +3 -0
- images-20000-29999.tar +3 -0
- images-30000-39999.tar +3 -0
- images-40000-49999.tar +3 -0
- images-50000-59999.tar +3 -0
- images-60000-69999.tar +3 -0
- images-70000-79999.tar +3 -0
- images-80000-89999.tar +3 -0
- images-90000-99999.tar +3 -0
- metadata.json +3 -0
- tar_maker.py +23 -0
.gitattributes
CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
57 |
# Video files - compressed
|
58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
60 |
+
metadata.json filter=lfs diff=lfs merge=lfs -text
|
OpenHumnoidActuatedFaceData.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json, random, math, os
|
2 |
+
from pathlib import Path
|
3 |
+
from datasets import (
|
4 |
+
BuilderConfig, DatasetInfo, DownloadManager, GeneratorBasedBuilder,
|
5 |
+
SplitGenerator, Split, Features, Image, Value
|
6 |
+
)
|
7 |
+
from huggingface_hub import hf_hub_url
|
8 |
+
|
9 |
+
_REPO_ID = "infosys/OpenHumnoidActuatedFaceData"
|
10 |
+
_IMAGES_PER_SHARD = 10_000 # how many files you put in each tar
|
11 |
+
_TAR_TPL = "images-{start:05d}-{end:05d}.tar" # file-name pattern
|
12 |
+
|
13 |
+
|
14 |
+
class ImageSubsetConfig(BuilderConfig):
|
15 |
+
def __init__(self, name, sample_size=None, **kw):
|
16 |
+
super().__init__(name=name, version="1.0.2",
|
17 |
+
description=kw.get("description", ""))
|
18 |
+
self.sample_size = sample_size
|
19 |
+
|
20 |
+
|
21 |
+
class MyImageDataset(GeneratorBasedBuilder):
|
22 |
+
BUILDER_CONFIGS = [
|
23 |
+
ImageSubsetConfig("full", sample_size=None,
|
24 |
+
description="Entire dataset (≈100 GB)"),
|
25 |
+
ImageSubsetConfig("small", sample_size=20_000,
|
26 |
+
description="20 K random images"),
|
27 |
+
]
|
28 |
+
DEFAULT_CONFIG_NAME = "small"
|
29 |
+
|
30 |
+
# ------------------------------------------------------------------ #
|
31 |
+
# 1. Schema #
|
32 |
+
# ------------------------------------------------------------------ #
|
33 |
+
def _info(self):
|
34 |
+
return DatasetInfo(
|
35 |
+
description="Humanoid face images + 16 servo angles.",
|
36 |
+
features=Features(
|
37 |
+
{
|
38 |
+
"image": Image(), # PIL.Image is fine
|
39 |
+
"actuated_angle":
|
40 |
+
{str(i): Value("int32") for i in range(16)},
|
41 |
+
}
|
42 |
+
),
|
43 |
+
)
|
44 |
+
|
45 |
+
# ------------------------------------------------------------------ #
|
46 |
+
# 2. Download #
|
47 |
+
# ------------------------------------------------------------------ #
|
48 |
+
def _split_generators(self, dl_manager: DownloadManager):
|
49 |
+
# ---- 2-a: load metadata -------------------------------------- #
|
50 |
+
meta_url = hf_hub_url(_REPO_ID, "metadata.json", repo_type="dataset")
|
51 |
+
meta_path = dl_manager.download(meta_url)
|
52 |
+
with open(meta_path, encoding="utf-8") as f:
|
53 |
+
metadata = json.load(f)
|
54 |
+
|
55 |
+
all_names = sorted(metadata)
|
56 |
+
selected = ( # sampling logic
|
57 |
+
sorted(random.sample(all_names, self.config.sample_size))
|
58 |
+
if self.config.sample_size else all_names
|
59 |
+
)
|
60 |
+
selected_set = set(selected)
|
61 |
+
|
62 |
+
# ---- 2-b: figure out which shards we need -------------------- #
|
63 |
+
max_idx = len(all_names) - 1
|
64 |
+
n_shards = math.floor(max_idx / _IMAGES_PER_SHARD) + 1
|
65 |
+
shard_files = [
|
66 |
+
_TAR_TPL.format(start=s*_IMAGES_PER_SHARD,
|
67 |
+
end=min((s+1)*_IMAGES_PER_SHARD-1, max_idx))
|
68 |
+
for s in range(n_shards)
|
69 |
+
]
|
70 |
+
|
71 |
+
# ---- 2-c: download (and extract) each tar -------------------- #
|
72 |
+
tar_urls = [hf_hub_url(_REPO_ID, f, repo_type="dataset")
|
73 |
+
for f in shard_files]
|
74 |
+
local_tars = dl_manager.download(tar_urls) # .tar paths
|
75 |
+
|
76 |
+
# we’ll stream from the tar, so no extract() needed
|
77 |
+
|
78 |
+
return [
|
79 |
+
SplitGenerator(
|
80 |
+
name=Split.TRAIN,
|
81 |
+
gen_kwargs={
|
82 |
+
"tar_paths": local_tars,
|
83 |
+
"metadata": metadata,
|
84 |
+
"want": selected_set,
|
85 |
+
},
|
86 |
+
)
|
87 |
+
]
|
88 |
+
|
89 |
+
# ------------------------------------------------------------------ #
|
90 |
+
# 3. Generate #
|
91 |
+
# ------------------------------------------------------------------ #
|
92 |
+
def _generate_examples(self, tar_paths, metadata, want):
|
93 |
+
"""Stream over each tar and yield only requested files."""
|
94 |
+
idx = 0
|
95 |
+
for tar_path in tar_paths:
|
96 |
+
# iterate without extraction
|
97 |
+
for inner_path, fobj in \
|
98 |
+
self._iter_archive_fast(tar_path): # helper below
|
99 |
+
fname = Path(inner_path).name # strip tar prefix
|
100 |
+
if fname not in want:
|
101 |
+
continue
|
102 |
+
|
103 |
+
angles = metadata[fname]
|
104 |
+
yield idx, {
|
105 |
+
"image": {"bytes": fobj.read(), "path": fname},
|
106 |
+
"actuated_angle":
|
107 |
+
{str(i): int(angles.get(str(i), 0)) for i in range(16)}
|
108 |
+
}
|
109 |
+
idx += 1
|
110 |
+
|
111 |
+
# Small wrapper so we don’t import datasets.utils.file_utils directly
|
112 |
+
@staticmethod
|
113 |
+
def _iter_archive_fast(tar_path):
|
114 |
+
import tarfile
|
115 |
+
with tarfile.open(tar_path) as tar:
|
116 |
+
for member in tar:
|
117 |
+
if member.isfile():
|
118 |
+
f = tar.extractfile(member)
|
119 |
+
yield member.name, f
|
images-00000-09999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c3d7f5642e3769a7d485ee29f8deb1beb151a210d9c24841b6c2bf5def6775b1
|
3 |
+
size 11352606720
|
images-10000-19999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a895b631f00a4be479af0383b6c8c0fe598f71c23f180a0967516f14fd24ac62
|
3 |
+
size 11351879680
|
images-100000-109999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bb62cd347217bca0e145264703e4197d0a00f058e849f8a34a27d1861fb28ec0
|
3 |
+
size 11352842240
|
images-110000-119999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6aa79ab6b0293547e844b6da9a2193f5156ede13d613e9b2db62f48b6b992a1e
|
3 |
+
size 11352760320
|
images-120000-129999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:07efe60887fdce8d7c342cda8de258f876990e516e360f31be128cbd8c449dcd
|
3 |
+
size 11352176640
|
images-130000-135235.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8898d1b36ffbf73187f1b878ba4b5d0f3a713b5f66c8e09900f8ea70b1e9feff
|
3 |
+
size 5943767040
|
images-20000-29999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49817f52cef3dc6fa9917219821a2f62153d58787c8cecd68a85c069042c505a
|
3 |
+
size 11352186880
|
images-30000-39999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5909376fdb83eaf8ffc71e4f0a497898418851de08de2664267d48574a084f1a
|
3 |
+
size 11353016320
|
images-40000-49999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5ec275238c987874e930d7604d4046112f4c20977931fd8455d4a5a6967f77d4
|
3 |
+
size 11353968640
|
images-50000-59999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c498ea28a5b4a3ae4a6ae3ffb6c7f4c8713021bcc0a4fda3b3d295b5176cd43f
|
3 |
+
size 11351685120
|
images-60000-69999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ed71bf466faa58836a03531894ae829f57f1c22bf7cf7baa8d495be8f30aa1ad
|
3 |
+
size 11352279040
|
images-70000-79999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11fa2294642abdf0a090260444a523739a67398614bf0ff8c13a249d0a59c838
|
3 |
+
size 11352637440
|
images-80000-89999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7c54663b1b0bedb2d1eda60ec79d87944d4d1597a4a02cda2f813c6c54de0bbd
|
3 |
+
size 11351603200
|
images-90000-99999.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f5ee435c3d1050577ae2f3c975cb13ecc61c67fc23e0d29f4ce0725851927e11
|
3 |
+
size 11351418880
|
metadata.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a70c8509508bab891f7e476dfa8219496b313669046cb995871443b07d12000
|
3 |
+
size 49865473
|
tar_maker.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
import os, tarfile, math, json
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
IMAGE_DIR = Path("images") # all PNGs live here now
|
6 |
+
META_JSON = Path("metadata.json")
|
7 |
+
PER_SHARD = 10_000 # ≤ 10 000 files per tar
|
8 |
+
OUT_DIR = Path(".") # tars are written next to script
|
9 |
+
|
10 |
+
# 1. load metadata so we keep ordering consistent
|
11 |
+
with META_JSON.open() as f:
|
12 |
+
meta = json.load(f)
|
13 |
+
all_names = sorted(meta)
|
14 |
+
|
15 |
+
n_shards = math.floor((len(all_names)-1) / PER_SHARD) + 1
|
16 |
+
for s in range(n_shards):
|
17 |
+
start = s * PER_SHARD
|
18 |
+
end = min((s+1)*PER_SHARD - 1, len(all_names) - 1)
|
19 |
+
shard_name = OUT_DIR / f"images-{start:05d}-{end:05d}.tar"
|
20 |
+
print(f" → building {shard_name}")
|
21 |
+
with tarfile.open(shard_name, "w") as tar:
|
22 |
+
for fname in all_names[start:end+1]:
|
23 |
+
tar.add(IMAGE_DIR / fname, arcname=fname)
|