File size: 9,890 Bytes
b31a43e ad92160 b31a43e a7e0827 b31a43e 7d13bcb b31a43e 6d37281 ad92160 6d37281 b31a43e 7d13bcb b31a43e ad92160 b31a43e ad92160 b31a43e ad92160 b31a43e ad92160 b31a43e ad92160 b31a43e ad92160 7d13bcb ad92160 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
import os
import glob
from pathlib import Path
from typing import List
import pandas as pd
import numpy as np
from tqdm import tqdm
import datasets
print("✅ Custom Causal3D loaded - outside code")
_CITATION = """\
@article{liu2025causal3d,
title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
journal={arXiv preprint arXiv:2503.04852},
year={2025}
}
"""
_DESCRIPTION = """\
Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
"""
_HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
_LICENSE = "CC-BY-4.0"
class Causal3D(datasets.GeneratorBasedBuilder):
DEFAULT_CONFIG_NAME = "real_scenes_Real_magnet_v3"
BUILDER_CONFIGS = [
# hypothetical_scenes
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_linear",
version=datasets.Version("1.0.0"),
description="Hypothetic_v2_linear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_nonlinear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v3_fully_connected_linear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_full_connected scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_v scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_nonlinear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_nonlinear_v scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear scene"),
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear_full_connected scene"),
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_linear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_linear_128P scene"),
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_nonlinear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_nonlinear_128P scene"),
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h5_nonlinear", version=datasets.Version("1.0.0"), description="rendered_h5_nonlinear scene"),
# real_scenes
datasets.BuilderConfig(name="real_scenes_Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3", version=datasets.Version("1.0.0"), description="Real_magnet_v3 scene"),
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3_5", version=datasets.Version("1.0.0"), description="Real_magnet_v3_5 scene"),
datasets.BuilderConfig(name="real_scenes_Real_parabola_multi_view", version=datasets.Version("1.0.0"), description="Real_parabola_multi_view scene"),
datasets.BuilderConfig(name="real_scenes_Real_spring_v3_256P", version=datasets.Version("1.0.0"), description="Real_spring_v3_256P scene"),
datasets.BuilderConfig(name="real_scenes_Water_flow_scene_render", version=datasets.Version("1.0.0"), description="Water_flow_scene_render scene"),
datasets.BuilderConfig(name="real_scenes_convex_len_render_images", version=datasets.Version("1.0.0"), description="convex_len_render_images scene"),
datasets.BuilderConfig(name="real_scenes_real_pendulum", version=datasets.Version("1.0.0"), description="real_pendulum scene"),
datasets.BuilderConfig(name="real_scenes_rendered_magnetic_128", version=datasets.Version("1.0.0"), description="rendered_magnetic_128 scene"),
datasets.BuilderConfig(name="real_scenes_rendered_reflection_128P", version=datasets.Version("1.0.0"), description="rendered_reflection_128P scene"),
datasets.BuilderConfig(name="real_scenes_seesaw_scene_128P", version=datasets.Version("1.0.0"), description="seesaw_scene_128P scene"),
datasets.BuilderConfig(name="real_scenes_spring_scene_128P", version=datasets.Version("1.0.0"), description="spring_scene_128P scene"),
]
def _info(self):
print(">>> Loaded config:", self.config.name) # 🟡 加这个调试输出
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"image": datasets.Image(),
"file_name": datasets.Value("string"),
"metadata": datasets.Value("string"), # optionally replace with structured fields
}),
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
parts = self.config.name.split("_", 2)
category = parts[0] + "_" + parts[1] # real_scenes or hypothetical_scenes
if category not in ["real_scenes", "hypothetical_scenes"]:
raise ValueError(f"Invalid category '{category}'. Must be one of ['real_scenes', 'hypothetical_scenes']")
scene = parts[2]
data_dir = os.path.join(category, scene)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_dir": data_dir},
)
]
def _generate_examples(self, data_dir):
# Find the .csv file
csv_files = list(Path(data_dir).rglob("*.csv"))
csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
if not csv_files:
print(f"\033[33m[SKIP] No CSV found in {data_dir}, skipping this config.\033[0m")
return # ✅ 跳过该 config,不报错
csv_path = csv_files[0]
df = pd.read_csv(csv_path)
if "image" not in df.columns:
print(f"\033[31m[SKIP] 'image' column not found in {csv_path}, skipping this config.\033[0m")
return
# sub_folders = [os.path.join(data_dir, i) for i in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, i))]
def color(text, code):
return f"\033[{code}m{text}\033[0m"
# print()
# print(color(f"data_dir: {data_dir}", "36")) # Cyan
# print(color(f"csv_path: {csv_path}", "33")) # Yellow
# print(color(f"csv_path.name: {csv_path.name}", "35")) # Magenta
# print(color(f"CSV columns: {list(df.columns)}", "32")) # Green
images = df["image"].tolist()
# images only contain image names
images = [i.split('/')[-1].split('.')[0] for i in images if i.endswith(('.png', '.jpg', '.jpeg'))]
# Load image paths
try:
image_files = {}
for ext in ("*.png", "*.jpg", "*.jpeg"):
for img_path in Path(data_dir).rglob(ext):
relative_path = str(img_path.relative_to(data_dir))
image_files[relative_path] = str(img_path)
parts = [i.split('/')[0] for i in list(image_files.keys())]
parts = set(parts)
if "part_000" not in parts:
parts= ['']
except Exception as e:
print(color(f"Error loading images: {e}", "31")) # Red
return
try:
# Match CSV rows with image paths
for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows", unit="row"):
fname = row["ID"]
raw_record_img_path = row["image"]
record_img_name = raw_record_img_path.split('/')[-1]
for part in parts:
if part == '':
record_img_path = record_img_name
else:
record_img_path = "/".join([part, record_img_name.strip()])
if "Water_flow_scene_render" in data_dir:
record_img_path = "/".join([part, str(int(record_img_name.strip().split('.')[0]))+".png"])
# print(f"raw_record_img_path: {raw_record_img_path}")
# print(f"record_img_name: {record_img_name}")
# print("part: ", part)
# print(f"part: {part}, record_img_name: {record_img_name}, record_img_path: {record_img_path}")
# print(f"record_img_path in image_files: {record_img_path in image_files}")
# print(image_files.keys())
# print(f"part: {part}, record_img_name: {record_img_name}, record_img_path: {record_img_path}, "
# f"record_image_path in image_files: {record_img_path in image_files}, image_files,key[0]: {list(image_files.keys())[0]}")
# print(image_files)
# exit(0)
if record_img_path in image_files:
# print(color(f"record_img_path: { image_files[record_img_path]}", "34")) # Blue
yield idx, {
"image": image_files[record_img_path],
"file_name": fname,
"metadata": row.to_json(),
}
break
except Exception as e:
print(color(f"Error processing CSV rows: {e}", "31"))
|