Convert dataset to Parquet
#7
by
LLDDSS
- opened
This view is limited to 50 files because it contains too many changes.
See the raw diff here.
- Causal3D.py +0 -165
- README.md +156 -66
- __init__.py +0 -1
- dataset.py +0 -175
- dataset_infos.json +0 -899
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00005.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00006.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00007.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00008.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00009.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00010.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00011.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00012.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00013.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00014.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00015.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00016.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00017.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00018.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00019.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00020.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00021.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00022.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00023.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00024.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00025.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00026.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00027.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00028.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00029.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00030.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00031.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00032.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00033.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00034.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00035.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00036.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00037.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00038.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00039.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00040.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00041.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00042.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00043.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00044.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00045.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00046.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00047.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00048.png +0 -3
- hypothetical_scenes/Hypothetic_v2_linear/part_000/00049.png +0 -3
Causal3D.py
DELETED
@@ -1,165 +0,0 @@
|
|
1 |
-
import datasets
|
2 |
-
import pandas as pd
|
3 |
-
import os
|
4 |
-
from pathlib import Path
|
5 |
-
from tqdm import tqdm
|
6 |
-
|
7 |
-
print("✅ Custom Causal3D loaded: outside Causal3D.py")
|
8 |
-
_CITATION = """\
|
9 |
-
@article{liu2025causal3d,
|
10 |
-
title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
|
11 |
-
author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
|
12 |
-
journal={arXiv preprint arXiv:2503.04852},
|
13 |
-
year={2025}
|
14 |
-
}
|
15 |
-
"""
|
16 |
-
|
17 |
-
_DESCRIPTION = """\
|
18 |
-
Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
|
19 |
-
It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
|
20 |
-
"""
|
21 |
-
|
22 |
-
_HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
|
23 |
-
_LICENSE = "CC-BY-4.0"
|
24 |
-
|
25 |
-
class Causal3D(datasets.GeneratorBasedBuilder):
|
26 |
-
DEFAULT_CONFIG_NAME = "real_scenes_Water_flow_scene_render"
|
27 |
-
BUILDER_CONFIGS = [
|
28 |
-
# hypothetical_scenes
|
29 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_linear scene"),
|
30 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_nonlinear scene"),
|
31 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v3_fully_connected_linear scene"),
|
32 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_full_connected scene"),
|
33 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_v scene"),
|
34 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_nonlinear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_nonlinear_v scene"),
|
35 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear scene"),
|
36 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear_full_connected scene"),
|
37 |
-
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_linear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_linear_128P scene"),
|
38 |
-
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_nonlinear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_nonlinear_128P scene"),
|
39 |
-
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h5_nonlinear", version=datasets.Version("1.0.0"), description="rendered_h5_nonlinear scene"),
|
40 |
-
|
41 |
-
# real_scenes
|
42 |
-
datasets.BuilderConfig(name="real_scenes_Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
|
43 |
-
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3", version=datasets.Version("1.0.0"), description="Real_magnet_v3 scene"),
|
44 |
-
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3_5", version=datasets.Version("1.0.0"), description="Real_magnet_v3_5 scene"),
|
45 |
-
# datasets.BuilderConfig(name="real_scenes_Real_Parabola_multi_view", version=datasets.Version("1.0.0"), description="Real_parabola_multi_view scene"),
|
46 |
-
datasets.BuilderConfig(name="real_scenes_Real_spring_v3_256P", version=datasets.Version("1.0.0"), description="Real_spring_v3_256P scene"),
|
47 |
-
datasets.BuilderConfig(name="real_scenes_Water_flow_scene_render", version=datasets.Version("1.0.0"), description="Water_flow_scene_render scene"),
|
48 |
-
datasets.BuilderConfig(name="real_scenes_convex_len_render_images", version=datasets.Version("1.0.0"), description="convex_len_render_images scene"),
|
49 |
-
datasets.BuilderConfig(name="real_scenes_real_pendulum", version=datasets.Version("1.0.0"), description="real_pendulum scene"),
|
50 |
-
datasets.BuilderConfig(name="real_scenes_rendered_magnetic_128", version=datasets.Version("1.0.0"), description="rendered_magnetic_128 scene"),
|
51 |
-
datasets.BuilderConfig(name="real_scenes_rendered_reflection_128P", version=datasets.Version("1.0.0"), description="rendered_reflection_128P scene"),
|
52 |
-
datasets.BuilderConfig(name="real_scenes_seesaw_scene_128P", version=datasets.Version("1.0.0"), description="seesaw_scene_128P scene"),
|
53 |
-
datasets.BuilderConfig(name="real_scenes_spring_scene_128P", version=datasets.Version("1.0.0"), description="spring_scene_128P scene"),
|
54 |
-
]
|
55 |
-
|
56 |
-
def _info(self):
|
57 |
-
return datasets.DatasetInfo(
|
58 |
-
description=_DESCRIPTION,
|
59 |
-
features=datasets.Features({
|
60 |
-
"image": datasets.Image(),
|
61 |
-
"file_name": datasets.Value("string"),
|
62 |
-
"metadata": datasets.Value("string"), # optionally replace with structured fields
|
63 |
-
}),
|
64 |
-
homepage=_HOMEPAGE,
|
65 |
-
license=_LICENSE,
|
66 |
-
citation=_CITATION,
|
67 |
-
)
|
68 |
-
|
69 |
-
def _split_generators(self, dl_manager):
|
70 |
-
parts = self.config.name.split("_", 2)
|
71 |
-
category = parts[0] + "_" + parts[1] # real_scenes or hypothetical_scenes
|
72 |
-
|
73 |
-
if category not in ["real_scenes", "hypothetical_scenes"]:
|
74 |
-
raise ValueError(f"Invalid category '{category}'. Must be one of ['real_scenes', 'hypothetical_scenes']")
|
75 |
-
|
76 |
-
scene = parts[2]
|
77 |
-
data_dir = os.path.join(category, scene)
|
78 |
-
|
79 |
-
return [
|
80 |
-
datasets.SplitGenerator(
|
81 |
-
name=datasets.Split.TRAIN,
|
82 |
-
gen_kwargs={"data_dir": data_dir},
|
83 |
-
)
|
84 |
-
]
|
85 |
-
|
86 |
-
def _generate_examples(self, data_dir):
|
87 |
-
def color(text, code):
|
88 |
-
return f"\033[{code}m{text}\033[0m"
|
89 |
-
|
90 |
-
# Load image paths
|
91 |
-
try:
|
92 |
-
image_files = {}
|
93 |
-
for ext in ("*.png", "*.jpg", "*.jpeg"):
|
94 |
-
for img_path in Path(data_dir).rglob(ext):
|
95 |
-
relative_path = str(img_path.relative_to(data_dir))
|
96 |
-
image_files[relative_path] = str(img_path)
|
97 |
-
parts = [i.split('/')[0] for i in list(image_files.keys())]
|
98 |
-
parts = set(parts)
|
99 |
-
if "part_000" not in parts:
|
100 |
-
parts= ['']
|
101 |
-
|
102 |
-
|
103 |
-
except Exception as e:
|
104 |
-
print(color(f"Error loading images: {e}", "31")) # Red
|
105 |
-
return
|
106 |
-
|
107 |
-
# Find the .csv file
|
108 |
-
csv_files = list(Path(data_dir).rglob("*.csv"))
|
109 |
-
csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
|
110 |
-
if not csv_files:
|
111 |
-
# print(f"\033[33m[SKIP] No CSV found in {data_dir}, skipping this config.\033[0m")
|
112 |
-
pass
|
113 |
-
# print(f"\033[33m[INFO] Found CSV: {csv_files}\033[0m")
|
114 |
-
csv_path = csv_files[0] if csv_files else None
|
115 |
-
df = pd.read_csv(csv_path) if csv_path else None
|
116 |
-
image_col_exists = True
|
117 |
-
if df is not None and "image" not in df.columns:
|
118 |
-
image_col_exists = False
|
119 |
-
|
120 |
-
images = df["image"].tolist() if image_col_exists and df is not None else []
|
121 |
-
images = [i.split('/')[-1].split('.')[0] for i in images if i.endswith(('.png', '.jpg', '.jpeg'))]
|
122 |
-
|
123 |
-
try:
|
124 |
-
# Match CSV rows with image paths
|
125 |
-
if df is None:
|
126 |
-
for i, j in tqdm(image_files.items(), desc="Processing images", unit="image"):
|
127 |
-
yield i, {
|
128 |
-
"image": j,
|
129 |
-
"file_name": i,
|
130 |
-
"metadata": None,
|
131 |
-
}
|
132 |
-
|
133 |
-
else:
|
134 |
-
for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows", unit="row"):
|
135 |
-
fname = row["ID"]
|
136 |
-
raw_record_img_path = images[idx] if images else "" #row["image"]
|
137 |
-
record_img_name = raw_record_img_path.split('/')[-1]
|
138 |
-
for part in parts:
|
139 |
-
if part == '':
|
140 |
-
record_img_path = record_img_name
|
141 |
-
else:
|
142 |
-
record_img_path = "/".join([part, record_img_name.strip()])
|
143 |
-
if "Water_flow_scene_render" in data_dir:
|
144 |
-
record_img_path = "/".join([part, str(int(record_img_name.strip().split('.')[0]))+".png"])
|
145 |
-
if record_img_path in image_files:
|
146 |
-
# print(color(f"record_img_path: { image_files[record_img_path]}", "34")) # Blue
|
147 |
-
yield idx, {
|
148 |
-
"image": image_files[record_img_path],
|
149 |
-
"file_name": fname,
|
150 |
-
"metadata": row.to_json(),
|
151 |
-
}
|
152 |
-
break
|
153 |
-
|
154 |
-
else:
|
155 |
-
yield idx, {
|
156 |
-
# "image": "",
|
157 |
-
"file_name": fname,
|
158 |
-
"metadata": row.to_json(),
|
159 |
-
}
|
160 |
-
break
|
161 |
-
|
162 |
-
|
163 |
-
except Exception as e:
|
164 |
-
print(color(f"Error processing CSV rows: {e}", "31"))
|
165 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
@@ -19,10 +19,10 @@ dataset_info:
|
|
19 |
dtype: string
|
20 |
splits:
|
21 |
- name: train
|
22 |
-
num_bytes:
|
23 |
num_examples: 14368
|
24 |
-
download_size:
|
25 |
-
dataset_size:
|
26 |
- config_name: hypothetical_scenes_Hypothetic_v2_nonlinear
|
27 |
features:
|
28 |
- name: image
|
@@ -33,10 +33,10 @@ dataset_info:
|
|
33 |
dtype: string
|
34 |
splits:
|
35 |
- name: train
|
36 |
-
num_bytes:
|
37 |
num_examples: 10000
|
38 |
-
download_size:
|
39 |
-
dataset_size:
|
40 |
- config_name: hypothetical_scenes_Hypothetic_v3_fully_connected_linear
|
41 |
features:
|
42 |
- name: image
|
@@ -47,10 +47,10 @@ dataset_info:
|
|
47 |
dtype: string
|
48 |
splits:
|
49 |
- name: train
|
50 |
-
num_bytes:
|
51 |
num_examples: 10000
|
52 |
-
download_size:
|
53 |
-
dataset_size:
|
54 |
- config_name: hypothetical_scenes_Hypothetic_v4_linear_full_connected
|
55 |
features:
|
56 |
- name: image
|
@@ -61,10 +61,10 @@ dataset_info:
|
|
61 |
dtype: string
|
62 |
splits:
|
63 |
- name: train
|
64 |
-
num_bytes:
|
65 |
num_examples: 10050
|
66 |
-
download_size:
|
67 |
-
dataset_size:
|
68 |
- config_name: hypothetical_scenes_Hypothetic_v4_linear_v
|
69 |
features:
|
70 |
- name: image
|
@@ -75,10 +75,10 @@ dataset_info:
|
|
75 |
dtype: string
|
76 |
splits:
|
77 |
- name: train
|
78 |
-
num_bytes:
|
79 |
num_examples: 10000
|
80 |
-
download_size:
|
81 |
-
dataset_size:
|
82 |
- config_name: hypothetical_scenes_Hypothetic_v4_nonlinear_v
|
83 |
features:
|
84 |
- name: image
|
@@ -89,10 +89,10 @@ dataset_info:
|
|
89 |
dtype: string
|
90 |
splits:
|
91 |
- name: train
|
92 |
-
num_bytes:
|
93 |
num_examples: 10000
|
94 |
-
download_size:
|
95 |
-
dataset_size:
|
96 |
- config_name: hypothetical_scenes_Hypothetic_v5_linear
|
97 |
features:
|
98 |
- name: image
|
@@ -103,10 +103,10 @@ dataset_info:
|
|
103 |
dtype: string
|
104 |
splits:
|
105 |
- name: train
|
106 |
-
num_bytes:
|
107 |
num_examples: 10000
|
108 |
-
download_size:
|
109 |
-
dataset_size:
|
110 |
- config_name: hypothetical_scenes_Hypothetic_v5_linear_full_connected
|
111 |
features:
|
112 |
- name: image
|
@@ -117,10 +117,10 @@ dataset_info:
|
|
117 |
dtype: string
|
118 |
splits:
|
119 |
- name: train
|
120 |
-
num_bytes:
|
121 |
num_examples: 10000
|
122 |
-
download_size:
|
123 |
-
dataset_size:
|
124 |
- config_name: hypothetical_scenes_rendered_h3_linear_128P
|
125 |
features:
|
126 |
- name: image
|
@@ -131,10 +131,10 @@ dataset_info:
|
|
131 |
dtype: string
|
132 |
splits:
|
133 |
- name: train
|
134 |
-
num_bytes:
|
135 |
num_examples: 15000
|
136 |
-
download_size:
|
137 |
-
dataset_size:
|
138 |
- config_name: hypothetical_scenes_rendered_h3_nonlinear_128P
|
139 |
features:
|
140 |
- name: image
|
@@ -145,10 +145,10 @@ dataset_info:
|
|
145 |
dtype: string
|
146 |
splits:
|
147 |
- name: train
|
148 |
-
num_bytes:
|
149 |
num_examples: 10223
|
150 |
-
download_size:
|
151 |
-
dataset_size:
|
152 |
- config_name: hypothetical_scenes_rendered_h5_nonlinear
|
153 |
features:
|
154 |
- name: image
|
@@ -159,10 +159,10 @@ dataset_info:
|
|
159 |
dtype: string
|
160 |
splits:
|
161 |
- name: train
|
162 |
-
num_bytes:
|
163 |
num_examples: 10360
|
164 |
-
download_size:
|
165 |
-
dataset_size:
|
166 |
- config_name: real_scenes_Real_Parabola
|
167 |
features:
|
168 |
- name: image
|
@@ -173,10 +173,10 @@ dataset_info:
|
|
173 |
dtype: string
|
174 |
splits:
|
175 |
- name: train
|
176 |
-
num_bytes:
|
177 |
num_examples: 10000
|
178 |
-
download_size:
|
179 |
-
dataset_size:
|
180 |
- config_name: real_scenes_Real_magnet_v3
|
181 |
features:
|
182 |
- name: image
|
@@ -187,10 +187,10 @@ dataset_info:
|
|
187 |
dtype: string
|
188 |
splits:
|
189 |
- name: train
|
190 |
-
num_bytes:
|
191 |
num_examples: 481
|
192 |
-
download_size:
|
193 |
-
dataset_size:
|
194 |
- config_name: real_scenes_Real_magnet_v3_5
|
195 |
features:
|
196 |
- name: image
|
@@ -201,10 +201,10 @@ dataset_info:
|
|
201 |
dtype: string
|
202 |
splits:
|
203 |
- name: train
|
204 |
-
num_bytes:
|
205 |
num_examples: 1503
|
206 |
-
download_size:
|
207 |
-
dataset_size:
|
208 |
- config_name: real_scenes_Real_parabola_multi_view
|
209 |
features:
|
210 |
- name: image
|
@@ -229,10 +229,10 @@ dataset_info:
|
|
229 |
dtype: string
|
230 |
splits:
|
231 |
- name: train
|
232 |
-
num_bytes:
|
233 |
num_examples: 450
|
234 |
-
download_size:
|
235 |
-
dataset_size:
|
236 |
- config_name: real_scenes_Water_flow_scene_render
|
237 |
features:
|
238 |
- name: image
|
@@ -243,10 +243,10 @@ dataset_info:
|
|
243 |
dtype: string
|
244 |
splits:
|
245 |
- name: train
|
246 |
-
num_bytes:
|
247 |
num_examples: 10000
|
248 |
-
download_size:
|
249 |
-
dataset_size:
|
250 |
- config_name: real_scenes_convex_len_render_images
|
251 |
features:
|
252 |
- name: image
|
@@ -257,10 +257,10 @@ dataset_info:
|
|
257 |
dtype: string
|
258 |
splits:
|
259 |
- name: train
|
260 |
-
num_bytes:
|
261 |
num_examples: 1078
|
262 |
-
download_size:
|
263 |
-
dataset_size:
|
264 |
- config_name: real_scenes_real_pendulum
|
265 |
features:
|
266 |
- name: image
|
@@ -271,10 +271,10 @@ dataset_info:
|
|
271 |
dtype: string
|
272 |
splits:
|
273 |
- name: train
|
274 |
-
num_bytes:
|
275 |
num_examples: 9999
|
276 |
-
download_size:
|
277 |
-
dataset_size:
|
278 |
- config_name: real_scenes_rendered_magnetic_128
|
279 |
features:
|
280 |
- name: image
|
@@ -285,10 +285,10 @@ dataset_info:
|
|
285 |
dtype: string
|
286 |
splits:
|
287 |
- name: train
|
288 |
-
num_bytes:
|
289 |
num_examples: 8350
|
290 |
-
download_size:
|
291 |
-
dataset_size:
|
292 |
- config_name: real_scenes_rendered_reflection_128P
|
293 |
features:
|
294 |
- name: image
|
@@ -299,10 +299,10 @@ dataset_info:
|
|
299 |
dtype: string
|
300 |
splits:
|
301 |
- name: train
|
302 |
-
num_bytes:
|
303 |
num_examples: 9995
|
304 |
-
download_size:
|
305 |
-
dataset_size:
|
306 |
- config_name: real_scenes_seesaw_scene_128P
|
307 |
features:
|
308 |
- name: image
|
@@ -313,10 +313,10 @@ dataset_info:
|
|
313 |
dtype: string
|
314 |
splits:
|
315 |
- name: train
|
316 |
-
num_bytes:
|
317 |
num_examples: 10000
|
318 |
-
download_size:
|
319 |
-
dataset_size:
|
320 |
- config_name: real_scenes_spring_scene_128P
|
321 |
features:
|
322 |
- name: image
|
@@ -327,10 +327,100 @@ dataset_info:
|
|
327 |
dtype: string
|
328 |
splits:
|
329 |
- name: train
|
330 |
-
num_bytes:
|
331 |
num_examples: 10000
|
332 |
-
download_size:
|
333 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
334 |
---
|
335 |
# 🧠 Causal3D: A Benchmark for Visual Causal Reasoning
|
336 |
|
|
|
19 |
dtype: string
|
20 |
splits:
|
21 |
- name: train
|
22 |
+
num_bytes: 2137802.16
|
23 |
num_examples: 14368
|
24 |
+
download_size: 1216402
|
25 |
+
dataset_size: 2137802.16
|
26 |
- config_name: hypothetical_scenes_Hypothetic_v2_nonlinear
|
27 |
features:
|
28 |
- name: image
|
|
|
33 |
dtype: string
|
34 |
splits:
|
35 |
- name: train
|
36 |
+
num_bytes: 1768656.0
|
37 |
num_examples: 10000
|
38 |
+
download_size: 939321
|
39 |
+
dataset_size: 1768656.0
|
40 |
- config_name: hypothetical_scenes_Hypothetic_v3_fully_connected_linear
|
41 |
features:
|
42 |
- name: image
|
|
|
47 |
dtype: string
|
48 |
splits:
|
49 |
- name: train
|
50 |
+
num_bytes: 1355793.0
|
51 |
num_examples: 10000
|
52 |
+
download_size: 617191
|
53 |
+
dataset_size: 1355793.0
|
54 |
- config_name: hypothetical_scenes_Hypothetic_v4_linear_full_connected
|
55 |
features:
|
56 |
- name: image
|
|
|
61 |
dtype: string
|
62 |
splits:
|
63 |
- name: train
|
64 |
+
num_bytes: 1658091.5
|
65 |
num_examples: 10050
|
66 |
+
download_size: 915357
|
67 |
+
dataset_size: 1658091.5
|
68 |
- config_name: hypothetical_scenes_Hypothetic_v4_linear_v
|
69 |
features:
|
70 |
- name: image
|
|
|
75 |
dtype: string
|
76 |
splits:
|
77 |
- name: train
|
78 |
+
num_bytes: 2012079.0
|
79 |
num_examples: 10000
|
80 |
+
download_size: 907646
|
81 |
+
dataset_size: 2012079.0
|
82 |
- config_name: hypothetical_scenes_Hypothetic_v4_nonlinear_v
|
83 |
features:
|
84 |
- name: image
|
|
|
89 |
dtype: string
|
90 |
splits:
|
91 |
- name: train
|
92 |
+
num_bytes: 2786917.0
|
93 |
num_examples: 10000
|
94 |
+
download_size: 1262319
|
95 |
+
dataset_size: 2786917.0
|
96 |
- config_name: hypothetical_scenes_Hypothetic_v5_linear
|
97 |
features:
|
98 |
- name: image
|
|
|
103 |
dtype: string
|
104 |
splits:
|
105 |
- name: train
|
106 |
+
num_bytes: 1915161.0
|
107 |
num_examples: 10000
|
108 |
+
download_size: 1048013
|
109 |
+
dataset_size: 1915161.0
|
110 |
- config_name: hypothetical_scenes_Hypothetic_v5_linear_full_connected
|
111 |
features:
|
112 |
- name: image
|
|
|
117 |
dtype: string
|
118 |
splits:
|
119 |
- name: train
|
120 |
+
num_bytes: 1914621.0
|
121 |
num_examples: 10000
|
122 |
+
download_size: 1051232
|
123 |
+
dataset_size: 1914621.0
|
124 |
- config_name: hypothetical_scenes_rendered_h3_linear_128P
|
125 |
features:
|
126 |
- name: image
|
|
|
131 |
dtype: string
|
132 |
splits:
|
133 |
- name: train
|
134 |
+
num_bytes: 5363548.0
|
135 |
num_examples: 15000
|
136 |
+
download_size: 2476630
|
137 |
+
dataset_size: 5363548.0
|
138 |
- config_name: hypothetical_scenes_rendered_h3_nonlinear_128P
|
139 |
features:
|
140 |
- name: image
|
|
|
145 |
dtype: string
|
146 |
splits:
|
147 |
- name: train
|
148 |
+
num_bytes: 3810279.01
|
149 |
num_examples: 10223
|
150 |
+
download_size: 1726102
|
151 |
+
dataset_size: 3810279.01
|
152 |
- config_name: hypothetical_scenes_rendered_h5_nonlinear
|
153 |
features:
|
154 |
- name: image
|
|
|
159 |
dtype: string
|
160 |
splits:
|
161 |
- name: train
|
162 |
+
num_bytes: 5416339.2
|
163 |
num_examples: 10360
|
164 |
+
download_size: 2056220
|
165 |
+
dataset_size: 5416339.2
|
166 |
- config_name: real_scenes_Real_Parabola
|
167 |
features:
|
168 |
- name: image
|
|
|
173 |
dtype: string
|
174 |
splits:
|
175 |
- name: train
|
176 |
+
num_bytes: 1282248.0
|
177 |
num_examples: 10000
|
178 |
+
download_size: 768322
|
179 |
+
dataset_size: 1282248.0
|
180 |
- config_name: real_scenes_Real_magnet_v3
|
181 |
features:
|
182 |
- name: image
|
|
|
187 |
dtype: string
|
188 |
splits:
|
189 |
- name: train
|
190 |
+
num_bytes: 72702.0
|
191 |
num_examples: 481
|
192 |
+
download_size: 48333
|
193 |
+
dataset_size: 72702.0
|
194 |
- config_name: real_scenes_Real_magnet_v3_5
|
195 |
features:
|
196 |
- name: image
|
|
|
201 |
dtype: string
|
202 |
splits:
|
203 |
- name: train
|
204 |
+
num_bytes: 228301.613
|
205 |
num_examples: 1503
|
206 |
+
download_size: 152240
|
207 |
+
dataset_size: 228301.613
|
208 |
- config_name: real_scenes_Real_parabola_multi_view
|
209 |
features:
|
210 |
- name: image
|
|
|
229 |
dtype: string
|
230 |
splits:
|
231 |
- name: train
|
232 |
+
num_bytes: 134466.0
|
233 |
num_examples: 450
|
234 |
+
download_size: 24433
|
235 |
+
dataset_size: 134466.0
|
236 |
- config_name: real_scenes_Water_flow_scene_render
|
237 |
features:
|
238 |
- name: image
|
|
|
243 |
dtype: string
|
244 |
splits:
|
245 |
- name: train
|
246 |
+
num_bytes: 3533718.0
|
247 |
num_examples: 10000
|
248 |
+
download_size: 1813070
|
249 |
+
dataset_size: 3533718.0
|
250 |
- config_name: real_scenes_convex_len_render_images
|
251 |
features:
|
252 |
- name: image
|
|
|
257 |
dtype: string
|
258 |
splits:
|
259 |
- name: train
|
260 |
+
num_bytes: 161948.95
|
261 |
num_examples: 1078
|
262 |
+
download_size: 106436
|
263 |
+
dataset_size: 161948.95
|
264 |
- config_name: real_scenes_real_pendulum
|
265 |
features:
|
266 |
- name: image
|
|
|
271 |
dtype: string
|
272 |
splits:
|
273 |
- name: train
|
274 |
+
num_bytes: 2884667.13
|
275 |
num_examples: 9999
|
276 |
+
download_size: 1558722
|
277 |
+
dataset_size: 2884667.13
|
278 |
- config_name: real_scenes_rendered_magnetic_128
|
279 |
features:
|
280 |
- name: image
|
|
|
285 |
dtype: string
|
286 |
splits:
|
287 |
- name: train
|
288 |
+
num_bytes: 2290040.5
|
289 |
num_examples: 8350
|
290 |
+
download_size: 933644
|
291 |
+
dataset_size: 2290040.5
|
292 |
- config_name: real_scenes_rendered_reflection_128P
|
293 |
features:
|
294 |
- name: image
|
|
|
299 |
dtype: string
|
300 |
splits:
|
301 |
- name: train
|
302 |
+
num_bytes: 2723942.65
|
303 |
num_examples: 9995
|
304 |
+
download_size: 1665779
|
305 |
+
dataset_size: 2723942.65
|
306 |
- config_name: real_scenes_seesaw_scene_128P
|
307 |
features:
|
308 |
- name: image
|
|
|
313 |
dtype: string
|
314 |
splits:
|
315 |
- name: train
|
316 |
+
num_bytes: 2234514.0
|
317 |
num_examples: 10000
|
318 |
+
download_size: 1257167
|
319 |
+
dataset_size: 2234514.0
|
320 |
- config_name: real_scenes_spring_scene_128P
|
321 |
features:
|
322 |
- name: image
|
|
|
327 |
dtype: string
|
328 |
splits:
|
329 |
- name: train
|
330 |
+
num_bytes: 2506086.0
|
331 |
num_examples: 10000
|
332 |
+
download_size: 951360
|
333 |
+
dataset_size: 2506086.0
|
334 |
+
configs:
|
335 |
+
- config_name: hypothetical_scenes_Hypothetic_v2_linear
|
336 |
+
data_files:
|
337 |
+
- split: train
|
338 |
+
path: hypothetical_scenes_Hypothetic_v2_linear/train-*
|
339 |
+
- config_name: hypothetical_scenes_Hypothetic_v2_nonlinear
|
340 |
+
data_files:
|
341 |
+
- split: train
|
342 |
+
path: hypothetical_scenes_Hypothetic_v2_nonlinear/train-*
|
343 |
+
- config_name: hypothetical_scenes_Hypothetic_v3_fully_connected_linear
|
344 |
+
data_files:
|
345 |
+
- split: train
|
346 |
+
path: hypothetical_scenes_Hypothetic_v3_fully_connected_linear/train-*
|
347 |
+
- config_name: hypothetical_scenes_Hypothetic_v4_linear_full_connected
|
348 |
+
data_files:
|
349 |
+
- split: train
|
350 |
+
path: hypothetical_scenes_Hypothetic_v4_linear_full_connected/train-*
|
351 |
+
- config_name: hypothetical_scenes_Hypothetic_v4_linear_v
|
352 |
+
data_files:
|
353 |
+
- split: train
|
354 |
+
path: hypothetical_scenes_Hypothetic_v4_linear_v/train-*
|
355 |
+
- config_name: hypothetical_scenes_Hypothetic_v4_nonlinear_v
|
356 |
+
data_files:
|
357 |
+
- split: train
|
358 |
+
path: hypothetical_scenes_Hypothetic_v4_nonlinear_v/train-*
|
359 |
+
- config_name: hypothetical_scenes_Hypothetic_v5_linear
|
360 |
+
data_files:
|
361 |
+
- split: train
|
362 |
+
path: hypothetical_scenes_Hypothetic_v5_linear/train-*
|
363 |
+
- config_name: hypothetical_scenes_Hypothetic_v5_linear_full_connected
|
364 |
+
data_files:
|
365 |
+
- split: train
|
366 |
+
path: hypothetical_scenes_Hypothetic_v5_linear_full_connected/train-*
|
367 |
+
- config_name: hypothetical_scenes_rendered_h3_linear_128P
|
368 |
+
data_files:
|
369 |
+
- split: train
|
370 |
+
path: hypothetical_scenes_rendered_h3_linear_128P/train-*
|
371 |
+
- config_name: hypothetical_scenes_rendered_h3_nonlinear_128P
|
372 |
+
data_files:
|
373 |
+
- split: train
|
374 |
+
path: hypothetical_scenes_rendered_h3_nonlinear_128P/train-*
|
375 |
+
- config_name: hypothetical_scenes_rendered_h5_nonlinear
|
376 |
+
data_files:
|
377 |
+
- split: train
|
378 |
+
path: hypothetical_scenes_rendered_h5_nonlinear/train-*
|
379 |
+
- config_name: real_scenes_Real_Parabola
|
380 |
+
data_files:
|
381 |
+
- split: train
|
382 |
+
path: real_scenes_Real_Parabola/train-*
|
383 |
+
- config_name: real_scenes_Real_magnet_v3
|
384 |
+
data_files:
|
385 |
+
- split: train
|
386 |
+
path: real_scenes_Real_magnet_v3/train-*
|
387 |
+
default: true
|
388 |
+
- config_name: real_scenes_Real_magnet_v3_5
|
389 |
+
data_files:
|
390 |
+
- split: train
|
391 |
+
path: real_scenes_Real_magnet_v3_5/train-*
|
392 |
+
- config_name: real_scenes_Real_spring_v3_256P
|
393 |
+
data_files:
|
394 |
+
- split: train
|
395 |
+
path: real_scenes_Real_spring_v3_256P/train-*
|
396 |
+
- config_name: real_scenes_Water_flow_scene_render
|
397 |
+
data_files:
|
398 |
+
- split: train
|
399 |
+
path: real_scenes_Water_flow_scene_render/train-*
|
400 |
+
- config_name: real_scenes_convex_len_render_images
|
401 |
+
data_files:
|
402 |
+
- split: train
|
403 |
+
path: real_scenes_convex_len_render_images/train-*
|
404 |
+
- config_name: real_scenes_real_pendulum
|
405 |
+
data_files:
|
406 |
+
- split: train
|
407 |
+
path: real_scenes_real_pendulum/train-*
|
408 |
+
- config_name: real_scenes_rendered_magnetic_128
|
409 |
+
data_files:
|
410 |
+
- split: train
|
411 |
+
path: real_scenes_rendered_magnetic_128/train-*
|
412 |
+
- config_name: real_scenes_rendered_reflection_128P
|
413 |
+
data_files:
|
414 |
+
- split: train
|
415 |
+
path: real_scenes_rendered_reflection_128P/train-*
|
416 |
+
- config_name: real_scenes_seesaw_scene_128P
|
417 |
+
data_files:
|
418 |
+
- split: train
|
419 |
+
path: real_scenes_seesaw_scene_128P/train-*
|
420 |
+
- config_name: real_scenes_spring_scene_128P
|
421 |
+
data_files:
|
422 |
+
- split: train
|
423 |
+
path: real_scenes_spring_scene_128P/train-*
|
424 |
---
|
425 |
# 🧠 Causal3D: A Benchmark for Visual Causal Reasoning
|
426 |
|
__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .Causal3D import Causal3D
|
|
|
|
dataset.py
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import glob
|
3 |
-
from pathlib import Path
|
4 |
-
from typing import List
|
5 |
-
import pandas as pd
|
6 |
-
import numpy as np
|
7 |
-
from tqdm import tqdm
|
8 |
-
import datasets
|
9 |
-
|
10 |
-
print("✅ Custom Causal3D loaded - outside code")
|
11 |
-
_CITATION = """\
|
12 |
-
@article{liu2025causal3d,
|
13 |
-
title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},
|
14 |
-
author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},
|
15 |
-
journal={arXiv preprint arXiv:2503.04852},
|
16 |
-
year={2025}
|
17 |
-
}
|
18 |
-
"""
|
19 |
-
|
20 |
-
_DESCRIPTION = """\
|
21 |
-
Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes.
|
22 |
-
It includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.
|
23 |
-
"""
|
24 |
-
|
25 |
-
_HOMEPAGE = "https://huggingface.co/datasets/LLDDSS/Causal3D"
|
26 |
-
_LICENSE = "CC-BY-4.0"
|
27 |
-
|
28 |
-
class Causal3D(datasets.GeneratorBasedBuilder):
|
29 |
-
DEFAULT_CONFIG_NAME = "real_scenes_Real_magnet_v3"
|
30 |
-
BUILDER_CONFIGS = [
|
31 |
-
# hypothetical_scenes
|
32 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_linear",
|
33 |
-
version=datasets.Version("1.0.0"),
|
34 |
-
description="Hypothetic_v2_linear scene"),
|
35 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v2_nonlinear", version=datasets.Version("1.0.0"), description="Hypothetic_v2_nonlinear scene"),
|
36 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v3_fully_connected_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v3_fully_connected_linear scene"),
|
37 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_full_connected scene"),
|
38 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_linear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_linear_v scene"),
|
39 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v4_nonlinear_v", version=datasets.Version("1.0.0"), description="Hypothetic_v4_nonlinear_v scene"),
|
40 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear scene"),
|
41 |
-
datasets.BuilderConfig(name="hypothetical_scenes_Hypothetic_v5_linear_full_connected", version=datasets.Version("1.0.0"), description="Hypothetic_v5_linear_full_connected scene"),
|
42 |
-
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_linear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_linear_128P scene"),
|
43 |
-
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h3_nonlinear_128P", version=datasets.Version("1.0.0"), description="rendered_h3_nonlinear_128P scene"),
|
44 |
-
datasets.BuilderConfig(name="hypothetical_scenes_rendered_h5_nonlinear", version=datasets.Version("1.0.0"), description="rendered_h5_nonlinear scene"),
|
45 |
-
|
46 |
-
# real_scenes
|
47 |
-
datasets.BuilderConfig(name="real_scenes_Real_Parabola", version=datasets.Version("1.0.0"), description="Real_Parabola scene"),
|
48 |
-
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3", version=datasets.Version("1.0.0"), description="Real_magnet_v3 scene"),
|
49 |
-
datasets.BuilderConfig(name="real_scenes_Real_magnet_v3_5", version=datasets.Version("1.0.0"), description="Real_magnet_v3_5 scene"),
|
50 |
-
datasets.BuilderConfig(name="real_scenes_Real_parabola_multi_view", version=datasets.Version("1.0.0"), description="Real_parabola_multi_view scene"),
|
51 |
-
datasets.BuilderConfig(name="real_scenes_Real_spring_v3_256P", version=datasets.Version("1.0.0"), description="Real_spring_v3_256P scene"),
|
52 |
-
datasets.BuilderConfig(name="real_scenes_Water_flow_scene_render", version=datasets.Version("1.0.0"), description="Water_flow_scene_render scene"),
|
53 |
-
datasets.BuilderConfig(name="real_scenes_convex_len_render_images", version=datasets.Version("1.0.0"), description="convex_len_render_images scene"),
|
54 |
-
datasets.BuilderConfig(name="real_scenes_real_pendulum", version=datasets.Version("1.0.0"), description="real_pendulum scene"),
|
55 |
-
datasets.BuilderConfig(name="real_scenes_rendered_magnetic_128", version=datasets.Version("1.0.0"), description="rendered_magnetic_128 scene"),
|
56 |
-
datasets.BuilderConfig(name="real_scenes_rendered_reflection_128P", version=datasets.Version("1.0.0"), description="rendered_reflection_128P scene"),
|
57 |
-
datasets.BuilderConfig(name="real_scenes_seesaw_scene_128P", version=datasets.Version("1.0.0"), description="seesaw_scene_128P scene"),
|
58 |
-
datasets.BuilderConfig(name="real_scenes_spring_scene_128P", version=datasets.Version("1.0.0"), description="spring_scene_128P scene"),
|
59 |
-
]
|
60 |
-
|
61 |
-
def _info(self):
|
62 |
-
print(">>> Loaded config:", self.config.name) # 🟡 加这个调试输出
|
63 |
-
return datasets.DatasetInfo(
|
64 |
-
description=_DESCRIPTION,
|
65 |
-
features=datasets.Features({
|
66 |
-
"image": datasets.Image(),
|
67 |
-
"file_name": datasets.Value("string"),
|
68 |
-
"metadata": datasets.Value("string"), # optionally replace with structured fields
|
69 |
-
}),
|
70 |
-
homepage=_HOMEPAGE,
|
71 |
-
license=_LICENSE,
|
72 |
-
citation=_CITATION,
|
73 |
-
)
|
74 |
-
|
75 |
-
def _split_generators(self, dl_manager):
|
76 |
-
parts = self.config.name.split("_", 2)
|
77 |
-
category = parts[0] + "_" + parts[1] # real_scenes or hypothetical_scenes
|
78 |
-
|
79 |
-
if category not in ["real_scenes", "hypothetical_scenes"]:
|
80 |
-
raise ValueError(f"Invalid category '{category}'. Must be one of ['real_scenes', 'hypothetical_scenes']")
|
81 |
-
|
82 |
-
scene = parts[2]
|
83 |
-
data_dir = os.path.join(category, scene)
|
84 |
-
|
85 |
-
return [
|
86 |
-
datasets.SplitGenerator(
|
87 |
-
name=datasets.Split.TRAIN,
|
88 |
-
gen_kwargs={"data_dir": data_dir},
|
89 |
-
)
|
90 |
-
]
|
91 |
-
|
92 |
-
def _generate_examples(self, data_dir):
|
93 |
-
# Find the .csv file
|
94 |
-
csv_files = list(Path(data_dir).rglob("*.csv"))
|
95 |
-
csv_files = [f for f in Path(data_dir).rglob("*.csv") if not f.name.startswith("._")]
|
96 |
-
if not csv_files:
|
97 |
-
print(f"\033[33m[SKIP] No CSV found in {data_dir}, skipping this config.\033[0m")
|
98 |
-
return # ✅ 跳过该 config,不报错
|
99 |
-
csv_path = csv_files[0]
|
100 |
-
df = pd.read_csv(csv_path)
|
101 |
-
if "image" not in df.columns:
|
102 |
-
print(f"\033[31m[SKIP] 'image' column not found in {csv_path}, skipping this config.\033[0m")
|
103 |
-
return
|
104 |
-
|
105 |
-
# sub_folders = [os.path.join(data_dir, i) for i in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, i))]
|
106 |
-
|
107 |
-
def color(text, code):
|
108 |
-
return f"\033[{code}m{text}\033[0m"
|
109 |
-
# print()
|
110 |
-
# print(color(f"data_dir: {data_dir}", "36")) # Cyan
|
111 |
-
# print(color(f"csv_path: {csv_path}", "33")) # Yellow
|
112 |
-
# print(color(f"csv_path.name: {csv_path.name}", "35")) # Magenta
|
113 |
-
# print(color(f"CSV columns: {list(df.columns)}", "32")) # Green
|
114 |
-
|
115 |
-
images = df["image"].tolist()
|
116 |
-
# images only contain image names
|
117 |
-
|
118 |
-
images = [i.split('/')[-1].split('.')[0] for i in images if i.endswith(('.png', '.jpg', '.jpeg'))]
|
119 |
-
|
120 |
-
|
121 |
-
# Load image paths
|
122 |
-
try:
|
123 |
-
image_files = {}
|
124 |
-
for ext in ("*.png", "*.jpg", "*.jpeg"):
|
125 |
-
for img_path in Path(data_dir).rglob(ext):
|
126 |
-
relative_path = str(img_path.relative_to(data_dir))
|
127 |
-
image_files[relative_path] = str(img_path)
|
128 |
-
parts = [i.split('/')[0] for i in list(image_files.keys())]
|
129 |
-
parts = set(parts)
|
130 |
-
if "part_000" not in parts:
|
131 |
-
parts= ['']
|
132 |
-
|
133 |
-
|
134 |
-
except Exception as e:
|
135 |
-
print(color(f"Error loading images: {e}", "31")) # Red
|
136 |
-
return
|
137 |
-
try:
|
138 |
-
# Match CSV rows with image paths
|
139 |
-
for idx, row in tqdm(df.iterrows(), total=len(df), desc="Processing rows", unit="row"):
|
140 |
-
fname = row["ID"]
|
141 |
-
raw_record_img_path = row["image"]
|
142 |
-
record_img_name = raw_record_img_path.split('/')[-1]
|
143 |
-
for part in parts:
|
144 |
-
if part == '':
|
145 |
-
record_img_path = record_img_name
|
146 |
-
else:
|
147 |
-
record_img_path = "/".join([part, record_img_name.strip()])
|
148 |
-
if "Water_flow_scene_render" in data_dir:
|
149 |
-
record_img_path = "/".join([part, str(int(record_img_name.strip().split('.')[0]))+".png"])
|
150 |
-
|
151 |
-
# print(f"raw_record_img_path: {raw_record_img_path}")
|
152 |
-
# print(f"record_img_name: {record_img_name}")
|
153 |
-
# print("part: ", part)
|
154 |
-
# print(f"part: {part}, record_img_name: {record_img_name}, record_img_path: {record_img_path}")
|
155 |
-
# print(f"record_img_path in image_files: {record_img_path in image_files}")
|
156 |
-
# print(image_files.keys())
|
157 |
-
# print(f"part: {part}, record_img_name: {record_img_name}, record_img_path: {record_img_path}, "
|
158 |
-
# f"record_image_path in image_files: {record_img_path in image_files}, image_files,key[0]: {list(image_files.keys())[0]}")
|
159 |
-
# print(image_files)
|
160 |
-
# exit(0)
|
161 |
-
if record_img_path in image_files:
|
162 |
-
# print(color(f"record_img_path: { image_files[record_img_path]}", "34")) # Blue
|
163 |
-
yield idx, {
|
164 |
-
"image": image_files[record_img_path],
|
165 |
-
"file_name": fname,
|
166 |
-
"metadata": row.to_json(),
|
167 |
-
}
|
168 |
-
break
|
169 |
-
|
170 |
-
|
171 |
-
except Exception as e:
|
172 |
-
print(color(f"Error processing CSV rows: {e}", "31"))
|
173 |
-
|
174 |
-
|
175 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_infos.json
DELETED
@@ -1,899 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"hypothetical_scenes_Hypothetic_v2_linear": {
|
3 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
4 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
5 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
6 |
-
"license": "CC-BY-4.0",
|
7 |
-
"features": {
|
8 |
-
"image": {
|
9 |
-
"_type": "Image"
|
10 |
-
},
|
11 |
-
"file_name": {
|
12 |
-
"dtype": "string",
|
13 |
-
"_type": "Value"
|
14 |
-
},
|
15 |
-
"metadata": {
|
16 |
-
"dtype": "string",
|
17 |
-
"_type": "Value"
|
18 |
-
}
|
19 |
-
},
|
20 |
-
"builder_name": "dataset",
|
21 |
-
"version": {
|
22 |
-
"version_str": "1.0.0",
|
23 |
-
"major": 1,
|
24 |
-
"minor": 0,
|
25 |
-
"patch": 0
|
26 |
-
},
|
27 |
-
"splits": {
|
28 |
-
"train": {
|
29 |
-
"name": "train",
|
30 |
-
"num_bytes": 0,
|
31 |
-
"num_examples": 0,
|
32 |
-
"dataset_name": "dataset"
|
33 |
-
}
|
34 |
-
},
|
35 |
-
"download_checksums": {},
|
36 |
-
"download_size": 0,
|
37 |
-
"dataset_size": 0,
|
38 |
-
"size_in_bytes": 0,
|
39 |
-
"config_name": "hypothetical_scenes_Hypothetic_v2_linear"
|
40 |
-
},
|
41 |
-
"hypothetical_scenes_Hypothetic_v2_nonlinear": {
|
42 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
43 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
44 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
45 |
-
"license": "CC-BY-4.0",
|
46 |
-
"features": {
|
47 |
-
"image": {
|
48 |
-
"_type": "Image"
|
49 |
-
},
|
50 |
-
"file_name": {
|
51 |
-
"dtype": "string",
|
52 |
-
"_type": "Value"
|
53 |
-
},
|
54 |
-
"metadata": {
|
55 |
-
"dtype": "string",
|
56 |
-
"_type": "Value"
|
57 |
-
}
|
58 |
-
},
|
59 |
-
"builder_name": "dataset",
|
60 |
-
"version": {
|
61 |
-
"version_str": "1.0.0",
|
62 |
-
"major": 1,
|
63 |
-
"minor": 0,
|
64 |
-
"patch": 0
|
65 |
-
},
|
66 |
-
"splits": {
|
67 |
-
"train": {
|
68 |
-
"name": "train",
|
69 |
-
"num_bytes": 0,
|
70 |
-
"num_examples": 0,
|
71 |
-
"dataset_name": "dataset"
|
72 |
-
}
|
73 |
-
},
|
74 |
-
"download_checksums": {},
|
75 |
-
"download_size": 0,
|
76 |
-
"dataset_size": 0,
|
77 |
-
"size_in_bytes": 0,
|
78 |
-
"config_name": "hypothetical_scenes_Hypothetic_v2_nonlinear"
|
79 |
-
},
|
80 |
-
"hypothetical_scenes_Hypothetic_v3_fully_connected_linear": {
|
81 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
82 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
83 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
84 |
-
"license": "CC-BY-4.0",
|
85 |
-
"features": {
|
86 |
-
"image": {
|
87 |
-
"_type": "Image"
|
88 |
-
},
|
89 |
-
"file_name": {
|
90 |
-
"dtype": "string",
|
91 |
-
"_type": "Value"
|
92 |
-
},
|
93 |
-
"metadata": {
|
94 |
-
"dtype": "string",
|
95 |
-
"_type": "Value"
|
96 |
-
}
|
97 |
-
},
|
98 |
-
"builder_name": "dataset",
|
99 |
-
"version": {
|
100 |
-
"version_str": "1.0.0",
|
101 |
-
"major": 1,
|
102 |
-
"minor": 0,
|
103 |
-
"patch": 0
|
104 |
-
},
|
105 |
-
"splits": {
|
106 |
-
"train": {
|
107 |
-
"name": "train",
|
108 |
-
"num_bytes": 0,
|
109 |
-
"num_examples": 0,
|
110 |
-
"dataset_name": "dataset"
|
111 |
-
}
|
112 |
-
},
|
113 |
-
"download_checksums": {},
|
114 |
-
"download_size": 0,
|
115 |
-
"dataset_size": 0,
|
116 |
-
"size_in_bytes": 0,
|
117 |
-
"config_name": "hypothetical_scenes_Hypothetic_v3_fully_connected_linear"
|
118 |
-
},
|
119 |
-
"hypothetical_scenes_Hypothetic_v4_linear_full_connected": {
|
120 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
121 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
122 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
123 |
-
"license": "CC-BY-4.0",
|
124 |
-
"features": {
|
125 |
-
"image": {
|
126 |
-
"_type": "Image"
|
127 |
-
},
|
128 |
-
"file_name": {
|
129 |
-
"dtype": "string",
|
130 |
-
"_type": "Value"
|
131 |
-
},
|
132 |
-
"metadata": {
|
133 |
-
"dtype": "string",
|
134 |
-
"_type": "Value"
|
135 |
-
}
|
136 |
-
},
|
137 |
-
"builder_name": "dataset",
|
138 |
-
"version": {
|
139 |
-
"version_str": "1.0.0",
|
140 |
-
"major": 1,
|
141 |
-
"minor": 0,
|
142 |
-
"patch": 0
|
143 |
-
},
|
144 |
-
"splits": {
|
145 |
-
"train": {
|
146 |
-
"name": "train",
|
147 |
-
"num_bytes": 0,
|
148 |
-
"num_examples": 0,
|
149 |
-
"dataset_name": "dataset"
|
150 |
-
}
|
151 |
-
},
|
152 |
-
"download_checksums": {},
|
153 |
-
"download_size": 0,
|
154 |
-
"dataset_size": 0,
|
155 |
-
"size_in_bytes": 0,
|
156 |
-
"config_name": "hypothetical_scenes_Hypothetic_v4_linear_full_connected"
|
157 |
-
},
|
158 |
-
"hypothetical_scenes_Hypothetic_v4_linear_v": {
|
159 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
160 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
161 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
162 |
-
"license": "CC-BY-4.0",
|
163 |
-
"features": {
|
164 |
-
"image": {
|
165 |
-
"_type": "Image"
|
166 |
-
},
|
167 |
-
"file_name": {
|
168 |
-
"dtype": "string",
|
169 |
-
"_type": "Value"
|
170 |
-
},
|
171 |
-
"metadata": {
|
172 |
-
"dtype": "string",
|
173 |
-
"_type": "Value"
|
174 |
-
}
|
175 |
-
},
|
176 |
-
"builder_name": "dataset",
|
177 |
-
"version": {
|
178 |
-
"version_str": "1.0.0",
|
179 |
-
"major": 1,
|
180 |
-
"minor": 0,
|
181 |
-
"patch": 0
|
182 |
-
},
|
183 |
-
"splits": {
|
184 |
-
"train": {
|
185 |
-
"name": "train",
|
186 |
-
"num_bytes": 0,
|
187 |
-
"num_examples": 0,
|
188 |
-
"dataset_name": "dataset"
|
189 |
-
}
|
190 |
-
},
|
191 |
-
"download_checksums": {},
|
192 |
-
"download_size": 0,
|
193 |
-
"dataset_size": 0,
|
194 |
-
"size_in_bytes": 0,
|
195 |
-
"config_name": "hypothetical_scenes_Hypothetic_v4_linear_v"
|
196 |
-
},
|
197 |
-
"hypothetical_scenes_Hypothetic_v4_nonlinear_v": {
|
198 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
199 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
200 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
201 |
-
"license": "CC-BY-4.0",
|
202 |
-
"features": {
|
203 |
-
"image": {
|
204 |
-
"_type": "Image"
|
205 |
-
},
|
206 |
-
"file_name": {
|
207 |
-
"dtype": "string",
|
208 |
-
"_type": "Value"
|
209 |
-
},
|
210 |
-
"metadata": {
|
211 |
-
"dtype": "string",
|
212 |
-
"_type": "Value"
|
213 |
-
}
|
214 |
-
},
|
215 |
-
"builder_name": "dataset",
|
216 |
-
"version": {
|
217 |
-
"version_str": "1.0.0",
|
218 |
-
"major": 1,
|
219 |
-
"minor": 0,
|
220 |
-
"patch": 0
|
221 |
-
},
|
222 |
-
"splits": {
|
223 |
-
"train": {
|
224 |
-
"name": "train",
|
225 |
-
"num_bytes": 0,
|
226 |
-
"num_examples": 0,
|
227 |
-
"dataset_name": "dataset"
|
228 |
-
}
|
229 |
-
},
|
230 |
-
"download_checksums": {},
|
231 |
-
"download_size": 0,
|
232 |
-
"dataset_size": 0,
|
233 |
-
"size_in_bytes": 0,
|
234 |
-
"config_name": "hypothetical_scenes_Hypothetic_v4_nonlinear_v"
|
235 |
-
},
|
236 |
-
"hypothetical_scenes_Hypothetic_v5_linear": {
|
237 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
238 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
239 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
240 |
-
"license": "CC-BY-4.0",
|
241 |
-
"features": {
|
242 |
-
"image": {
|
243 |
-
"_type": "Image"
|
244 |
-
},
|
245 |
-
"file_name": {
|
246 |
-
"dtype": "string",
|
247 |
-
"_type": "Value"
|
248 |
-
},
|
249 |
-
"metadata": {
|
250 |
-
"dtype": "string",
|
251 |
-
"_type": "Value"
|
252 |
-
}
|
253 |
-
},
|
254 |
-
"builder_name": "dataset",
|
255 |
-
"version": {
|
256 |
-
"version_str": "1.0.0",
|
257 |
-
"major": 1,
|
258 |
-
"minor": 0,
|
259 |
-
"patch": 0
|
260 |
-
},
|
261 |
-
"splits": {
|
262 |
-
"train": {
|
263 |
-
"name": "train",
|
264 |
-
"num_bytes": 0,
|
265 |
-
"num_examples": 0,
|
266 |
-
"dataset_name": "dataset"
|
267 |
-
}
|
268 |
-
},
|
269 |
-
"download_checksums": {},
|
270 |
-
"download_size": 0,
|
271 |
-
"dataset_size": 0,
|
272 |
-
"size_in_bytes": 0,
|
273 |
-
"config_name": "hypothetical_scenes_Hypothetic_v5_linear"
|
274 |
-
},
|
275 |
-
"hypothetical_scenes_Hypothetic_v5_linear_full_connected": {
|
276 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
277 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
278 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
279 |
-
"license": "CC-BY-4.0",
|
280 |
-
"features": {
|
281 |
-
"image": {
|
282 |
-
"_type": "Image"
|
283 |
-
},
|
284 |
-
"file_name": {
|
285 |
-
"dtype": "string",
|
286 |
-
"_type": "Value"
|
287 |
-
},
|
288 |
-
"metadata": {
|
289 |
-
"dtype": "string",
|
290 |
-
"_type": "Value"
|
291 |
-
}
|
292 |
-
},
|
293 |
-
"builder_name": "dataset",
|
294 |
-
"version": {
|
295 |
-
"version_str": "1.0.0",
|
296 |
-
"major": 1,
|
297 |
-
"minor": 0,
|
298 |
-
"patch": 0
|
299 |
-
},
|
300 |
-
"splits": {
|
301 |
-
"train": {
|
302 |
-
"name": "train",
|
303 |
-
"num_bytes": 0,
|
304 |
-
"num_examples": 0,
|
305 |
-
"dataset_name": "dataset"
|
306 |
-
}
|
307 |
-
},
|
308 |
-
"download_checksums": {},
|
309 |
-
"download_size": 0,
|
310 |
-
"dataset_size": 0,
|
311 |
-
"size_in_bytes": 0,
|
312 |
-
"config_name": "hypothetical_scenes_Hypothetic_v5_linear_full_connected"
|
313 |
-
},
|
314 |
-
"hypothetical_scenes_rendered_h3_linear_128P": {
|
315 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
316 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
317 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
318 |
-
"license": "CC-BY-4.0",
|
319 |
-
"features": {
|
320 |
-
"image": {
|
321 |
-
"_type": "Image"
|
322 |
-
},
|
323 |
-
"file_name": {
|
324 |
-
"dtype": "string",
|
325 |
-
"_type": "Value"
|
326 |
-
},
|
327 |
-
"metadata": {
|
328 |
-
"dtype": "string",
|
329 |
-
"_type": "Value"
|
330 |
-
}
|
331 |
-
},
|
332 |
-
"builder_name": "dataset",
|
333 |
-
"version": {
|
334 |
-
"version_str": "1.0.0",
|
335 |
-
"major": 1,
|
336 |
-
"minor": 0,
|
337 |
-
"patch": 0
|
338 |
-
},
|
339 |
-
"splits": {
|
340 |
-
"train": {
|
341 |
-
"name": "train",
|
342 |
-
"num_bytes": 0,
|
343 |
-
"num_examples": 0,
|
344 |
-
"dataset_name": "dataset"
|
345 |
-
}
|
346 |
-
},
|
347 |
-
"download_checksums": {},
|
348 |
-
"download_size": 0,
|
349 |
-
"dataset_size": 0,
|
350 |
-
"size_in_bytes": 0,
|
351 |
-
"config_name": "hypothetical_scenes_rendered_h3_linear_128P"
|
352 |
-
},
|
353 |
-
"hypothetical_scenes_rendered_h3_nonlinear_128P": {
|
354 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
355 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
356 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
357 |
-
"license": "CC-BY-4.0",
|
358 |
-
"features": {
|
359 |
-
"image": {
|
360 |
-
"_type": "Image"
|
361 |
-
},
|
362 |
-
"file_name": {
|
363 |
-
"dtype": "string",
|
364 |
-
"_type": "Value"
|
365 |
-
},
|
366 |
-
"metadata": {
|
367 |
-
"dtype": "string",
|
368 |
-
"_type": "Value"
|
369 |
-
}
|
370 |
-
},
|
371 |
-
"builder_name": "dataset",
|
372 |
-
"version": {
|
373 |
-
"version_str": "1.0.0",
|
374 |
-
"major": 1,
|
375 |
-
"minor": 0,
|
376 |
-
"patch": 0
|
377 |
-
},
|
378 |
-
"splits": {
|
379 |
-
"train": {
|
380 |
-
"name": "train",
|
381 |
-
"num_bytes": 0,
|
382 |
-
"num_examples": 0,
|
383 |
-
"dataset_name": "dataset"
|
384 |
-
}
|
385 |
-
},
|
386 |
-
"download_checksums": {},
|
387 |
-
"download_size": 0,
|
388 |
-
"dataset_size": 0,
|
389 |
-
"size_in_bytes": 0,
|
390 |
-
"config_name": "hypothetical_scenes_rendered_h3_nonlinear_128P"
|
391 |
-
},
|
392 |
-
"hypothetical_scenes_rendered_h5_nonlinear": {
|
393 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
394 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
395 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
396 |
-
"license": "CC-BY-4.0",
|
397 |
-
"features": {
|
398 |
-
"image": {
|
399 |
-
"_type": "Image"
|
400 |
-
},
|
401 |
-
"file_name": {
|
402 |
-
"dtype": "string",
|
403 |
-
"_type": "Value"
|
404 |
-
},
|
405 |
-
"metadata": {
|
406 |
-
"dtype": "string",
|
407 |
-
"_type": "Value"
|
408 |
-
}
|
409 |
-
},
|
410 |
-
"builder_name": "dataset",
|
411 |
-
"version": {
|
412 |
-
"version_str": "1.0.0",
|
413 |
-
"major": 1,
|
414 |
-
"minor": 0,
|
415 |
-
"patch": 0
|
416 |
-
},
|
417 |
-
"splits": {
|
418 |
-
"train": {
|
419 |
-
"name": "train",
|
420 |
-
"num_bytes": 0,
|
421 |
-
"num_examples": 0,
|
422 |
-
"dataset_name": "dataset"
|
423 |
-
}
|
424 |
-
},
|
425 |
-
"download_checksums": {},
|
426 |
-
"download_size": 0,
|
427 |
-
"dataset_size": 0,
|
428 |
-
"size_in_bytes": 0,
|
429 |
-
"config_name": "hypothetical_scenes_rendered_h5_nonlinear"
|
430 |
-
},
|
431 |
-
"real_scenes_Real_Parabola": {
|
432 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
433 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
434 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
435 |
-
"license": "CC-BY-4.0",
|
436 |
-
"features": {
|
437 |
-
"image": {
|
438 |
-
"_type": "Image"
|
439 |
-
},
|
440 |
-
"file_name": {
|
441 |
-
"dtype": "string",
|
442 |
-
"_type": "Value"
|
443 |
-
},
|
444 |
-
"metadata": {
|
445 |
-
"dtype": "string",
|
446 |
-
"_type": "Value"
|
447 |
-
}
|
448 |
-
},
|
449 |
-
"builder_name": "dataset",
|
450 |
-
"version": {
|
451 |
-
"version_str": "1.0.0",
|
452 |
-
"major": 1,
|
453 |
-
"minor": 0,
|
454 |
-
"patch": 0
|
455 |
-
},
|
456 |
-
"splits": {
|
457 |
-
"train": {
|
458 |
-
"name": "train",
|
459 |
-
"num_bytes": 0,
|
460 |
-
"num_examples": 0,
|
461 |
-
"dataset_name": "dataset"
|
462 |
-
}
|
463 |
-
},
|
464 |
-
"download_checksums": {},
|
465 |
-
"download_size": 0,
|
466 |
-
"dataset_size": 0,
|
467 |
-
"size_in_bytes": 0,
|
468 |
-
"config_name": "real_scenes_Real_Parabola"
|
469 |
-
},
|
470 |
-
"real_scenes_Real_magnet_v3": {
|
471 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
472 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
473 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
474 |
-
"license": "CC-BY-4.0",
|
475 |
-
"features": {
|
476 |
-
"image": {
|
477 |
-
"_type": "Image"
|
478 |
-
},
|
479 |
-
"file_name": {
|
480 |
-
"dtype": "string",
|
481 |
-
"_type": "Value"
|
482 |
-
},
|
483 |
-
"metadata": {
|
484 |
-
"dtype": "string",
|
485 |
-
"_type": "Value"
|
486 |
-
}
|
487 |
-
},
|
488 |
-
"builder_name": "dataset",
|
489 |
-
"version": {
|
490 |
-
"version_str": "1.0.0",
|
491 |
-
"major": 1,
|
492 |
-
"minor": 0,
|
493 |
-
"patch": 0
|
494 |
-
},
|
495 |
-
"splits": {
|
496 |
-
"train": {
|
497 |
-
"name": "train",
|
498 |
-
"num_bytes": 0,
|
499 |
-
"num_examples": 0,
|
500 |
-
"dataset_name": "dataset"
|
501 |
-
}
|
502 |
-
},
|
503 |
-
"download_checksums": {},
|
504 |
-
"download_size": 0,
|
505 |
-
"dataset_size": 0,
|
506 |
-
"size_in_bytes": 0,
|
507 |
-
"config_name": "real_scenes_Real_magnet_v3"
|
508 |
-
},
|
509 |
-
"real_scenes_Real_magnet_v3_5": {
|
510 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
511 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
512 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
513 |
-
"license": "CC-BY-4.0",
|
514 |
-
"features": {
|
515 |
-
"image": {
|
516 |
-
"_type": "Image"
|
517 |
-
},
|
518 |
-
"file_name": {
|
519 |
-
"dtype": "string",
|
520 |
-
"_type": "Value"
|
521 |
-
},
|
522 |
-
"metadata": {
|
523 |
-
"dtype": "string",
|
524 |
-
"_type": "Value"
|
525 |
-
}
|
526 |
-
},
|
527 |
-
"builder_name": "dataset",
|
528 |
-
"version": {
|
529 |
-
"version_str": "1.0.0",
|
530 |
-
"major": 1,
|
531 |
-
"minor": 0,
|
532 |
-
"patch": 0
|
533 |
-
},
|
534 |
-
"splits": {
|
535 |
-
"train": {
|
536 |
-
"name": "train",
|
537 |
-
"num_bytes": 0,
|
538 |
-
"num_examples": 0,
|
539 |
-
"dataset_name": "dataset"
|
540 |
-
}
|
541 |
-
},
|
542 |
-
"download_checksums": {},
|
543 |
-
"download_size": 0,
|
544 |
-
"dataset_size": 0,
|
545 |
-
"size_in_bytes": 0,
|
546 |
-
"config_name": "real_scenes_Real_magnet_v3_5"
|
547 |
-
},
|
548 |
-
"real_scenes_Real_parabola_multi_view": {
|
549 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
550 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
551 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
552 |
-
"license": "CC-BY-4.0",
|
553 |
-
"features": {
|
554 |
-
"image": {
|
555 |
-
"_type": "Image"
|
556 |
-
},
|
557 |
-
"file_name": {
|
558 |
-
"dtype": "string",
|
559 |
-
"_type": "Value"
|
560 |
-
},
|
561 |
-
"metadata": {
|
562 |
-
"dtype": "string",
|
563 |
-
"_type": "Value"
|
564 |
-
}
|
565 |
-
},
|
566 |
-
"builder_name": "dataset",
|
567 |
-
"version": {
|
568 |
-
"version_str": "1.0.0",
|
569 |
-
"major": 1,
|
570 |
-
"minor": 0,
|
571 |
-
"patch": 0
|
572 |
-
},
|
573 |
-
"splits": {
|
574 |
-
"train": {
|
575 |
-
"name": "train",
|
576 |
-
"num_bytes": 0,
|
577 |
-
"num_examples": 0,
|
578 |
-
"dataset_name": "dataset"
|
579 |
-
}
|
580 |
-
},
|
581 |
-
"download_checksums": {},
|
582 |
-
"download_size": 0,
|
583 |
-
"dataset_size": 0,
|
584 |
-
"size_in_bytes": 0,
|
585 |
-
"config_name": "real_scenes_Real_parabola_multi_view"
|
586 |
-
},
|
587 |
-
"real_scenes_Real_spring_v3_256P": {
|
588 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
589 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
590 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
591 |
-
"license": "CC-BY-4.0",
|
592 |
-
"features": {
|
593 |
-
"image": {
|
594 |
-
"_type": "Image"
|
595 |
-
},
|
596 |
-
"file_name": {
|
597 |
-
"dtype": "string",
|
598 |
-
"_type": "Value"
|
599 |
-
},
|
600 |
-
"metadata": {
|
601 |
-
"dtype": "string",
|
602 |
-
"_type": "Value"
|
603 |
-
}
|
604 |
-
},
|
605 |
-
"builder_name": "dataset",
|
606 |
-
"version": {
|
607 |
-
"version_str": "1.0.0",
|
608 |
-
"major": 1,
|
609 |
-
"minor": 0,
|
610 |
-
"patch": 0
|
611 |
-
},
|
612 |
-
"splits": {
|
613 |
-
"train": {
|
614 |
-
"name": "train",
|
615 |
-
"num_bytes": 0,
|
616 |
-
"num_examples": 0,
|
617 |
-
"dataset_name": "dataset"
|
618 |
-
}
|
619 |
-
},
|
620 |
-
"download_checksums": {},
|
621 |
-
"download_size": 0,
|
622 |
-
"dataset_size": 0,
|
623 |
-
"size_in_bytes": 0,
|
624 |
-
"config_name": "real_scenes_Real_spring_v3_256P"
|
625 |
-
},
|
626 |
-
"real_scenes_Water_flow_scene_render": {
|
627 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
628 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
629 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
630 |
-
"license": "CC-BY-4.0",
|
631 |
-
"features": {
|
632 |
-
"image": {
|
633 |
-
"_type": "Image"
|
634 |
-
},
|
635 |
-
"file_name": {
|
636 |
-
"dtype": "string",
|
637 |
-
"_type": "Value"
|
638 |
-
},
|
639 |
-
"metadata": {
|
640 |
-
"dtype": "string",
|
641 |
-
"_type": "Value"
|
642 |
-
}
|
643 |
-
},
|
644 |
-
"builder_name": "dataset",
|
645 |
-
"version": {
|
646 |
-
"version_str": "1.0.0",
|
647 |
-
"major": 1,
|
648 |
-
"minor": 0,
|
649 |
-
"patch": 0
|
650 |
-
},
|
651 |
-
"splits": {
|
652 |
-
"train": {
|
653 |
-
"name": "train",
|
654 |
-
"num_bytes": 0,
|
655 |
-
"num_examples": 0,
|
656 |
-
"dataset_name": "dataset"
|
657 |
-
}
|
658 |
-
},
|
659 |
-
"download_checksums": {},
|
660 |
-
"download_size": 0,
|
661 |
-
"dataset_size": 0,
|
662 |
-
"size_in_bytes": 0,
|
663 |
-
"config_name": "real_scenes_Water_flow_scene_render"
|
664 |
-
},
|
665 |
-
"real_scenes_convex_len_render_images": {
|
666 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
667 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
668 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
669 |
-
"license": "CC-BY-4.0",
|
670 |
-
"features": {
|
671 |
-
"image": {
|
672 |
-
"_type": "Image"
|
673 |
-
},
|
674 |
-
"file_name": {
|
675 |
-
"dtype": "string",
|
676 |
-
"_type": "Value"
|
677 |
-
},
|
678 |
-
"metadata": {
|
679 |
-
"dtype": "string",
|
680 |
-
"_type": "Value"
|
681 |
-
}
|
682 |
-
},
|
683 |
-
"builder_name": "dataset",
|
684 |
-
"version": {
|
685 |
-
"version_str": "1.0.0",
|
686 |
-
"major": 1,
|
687 |
-
"minor": 0,
|
688 |
-
"patch": 0
|
689 |
-
},
|
690 |
-
"splits": {
|
691 |
-
"train": {
|
692 |
-
"name": "train",
|
693 |
-
"num_bytes": 0,
|
694 |
-
"num_examples": 0,
|
695 |
-
"dataset_name": "dataset"
|
696 |
-
}
|
697 |
-
},
|
698 |
-
"download_checksums": {},
|
699 |
-
"download_size": 0,
|
700 |
-
"dataset_size": 0,
|
701 |
-
"size_in_bytes": 0,
|
702 |
-
"config_name": "real_scenes_convex_len_render_images"
|
703 |
-
},
|
704 |
-
"real_scenes_real_pendulum": {
|
705 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
706 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
707 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
708 |
-
"license": "CC-BY-4.0",
|
709 |
-
"features": {
|
710 |
-
"image": {
|
711 |
-
"_type": "Image"
|
712 |
-
},
|
713 |
-
"file_name": {
|
714 |
-
"dtype": "string",
|
715 |
-
"_type": "Value"
|
716 |
-
},
|
717 |
-
"metadata": {
|
718 |
-
"dtype": "string",
|
719 |
-
"_type": "Value"
|
720 |
-
}
|
721 |
-
},
|
722 |
-
"builder_name": "dataset",
|
723 |
-
"version": {
|
724 |
-
"version_str": "1.0.0",
|
725 |
-
"major": 1,
|
726 |
-
"minor": 0,
|
727 |
-
"patch": 0
|
728 |
-
},
|
729 |
-
"splits": {
|
730 |
-
"train": {
|
731 |
-
"name": "train",
|
732 |
-
"num_bytes": 0,
|
733 |
-
"num_examples": 0,
|
734 |
-
"dataset_name": "dataset"
|
735 |
-
}
|
736 |
-
},
|
737 |
-
"download_checksums": {},
|
738 |
-
"download_size": 0,
|
739 |
-
"dataset_size": 0,
|
740 |
-
"size_in_bytes": 0,
|
741 |
-
"config_name": "real_scenes_real_pendulum"
|
742 |
-
},
|
743 |
-
"real_scenes_rendered_magnetic_128": {
|
744 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
745 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
746 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
747 |
-
"license": "CC-BY-4.0",
|
748 |
-
"features": {
|
749 |
-
"image": {
|
750 |
-
"_type": "Image"
|
751 |
-
},
|
752 |
-
"file_name": {
|
753 |
-
"dtype": "string",
|
754 |
-
"_type": "Value"
|
755 |
-
},
|
756 |
-
"metadata": {
|
757 |
-
"dtype": "string",
|
758 |
-
"_type": "Value"
|
759 |
-
}
|
760 |
-
},
|
761 |
-
"builder_name": "dataset",
|
762 |
-
"version": {
|
763 |
-
"version_str": "1.0.0",
|
764 |
-
"major": 1,
|
765 |
-
"minor": 0,
|
766 |
-
"patch": 0
|
767 |
-
},
|
768 |
-
"splits": {
|
769 |
-
"train": {
|
770 |
-
"name": "train",
|
771 |
-
"num_bytes": 0,
|
772 |
-
"num_examples": 0,
|
773 |
-
"dataset_name": "dataset"
|
774 |
-
}
|
775 |
-
},
|
776 |
-
"download_checksums": {},
|
777 |
-
"download_size": 0,
|
778 |
-
"dataset_size": 0,
|
779 |
-
"size_in_bytes": 0,
|
780 |
-
"config_name": "real_scenes_rendered_magnetic_128"
|
781 |
-
},
|
782 |
-
"real_scenes_rendered_reflection_128P": {
|
783 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
784 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
785 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
786 |
-
"license": "CC-BY-4.0",
|
787 |
-
"features": {
|
788 |
-
"image": {
|
789 |
-
"_type": "Image"
|
790 |
-
},
|
791 |
-
"file_name": {
|
792 |
-
"dtype": "string",
|
793 |
-
"_type": "Value"
|
794 |
-
},
|
795 |
-
"metadata": {
|
796 |
-
"dtype": "string",
|
797 |
-
"_type": "Value"
|
798 |
-
}
|
799 |
-
},
|
800 |
-
"builder_name": "dataset",
|
801 |
-
"version": {
|
802 |
-
"version_str": "1.0.0",
|
803 |
-
"major": 1,
|
804 |
-
"minor": 0,
|
805 |
-
"patch": 0
|
806 |
-
},
|
807 |
-
"splits": {
|
808 |
-
"train": {
|
809 |
-
"name": "train",
|
810 |
-
"num_bytes": 0,
|
811 |
-
"num_examples": 0,
|
812 |
-
"dataset_name": "dataset"
|
813 |
-
}
|
814 |
-
},
|
815 |
-
"download_checksums": {},
|
816 |
-
"download_size": 0,
|
817 |
-
"dataset_size": 0,
|
818 |
-
"size_in_bytes": 0,
|
819 |
-
"config_name": "real_scenes_rendered_reflection_128P"
|
820 |
-
},
|
821 |
-
"real_scenes_seesaw_scene_128P": {
|
822 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
823 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
824 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
825 |
-
"license": "CC-BY-4.0",
|
826 |
-
"features": {
|
827 |
-
"image": {
|
828 |
-
"_type": "Image"
|
829 |
-
},
|
830 |
-
"file_name": {
|
831 |
-
"dtype": "string",
|
832 |
-
"_type": "Value"
|
833 |
-
},
|
834 |
-
"metadata": {
|
835 |
-
"dtype": "string",
|
836 |
-
"_type": "Value"
|
837 |
-
}
|
838 |
-
},
|
839 |
-
"builder_name": "dataset",
|
840 |
-
"version": {
|
841 |
-
"version_str": "1.0.0",
|
842 |
-
"major": 1,
|
843 |
-
"minor": 0,
|
844 |
-
"patch": 0
|
845 |
-
},
|
846 |
-
"splits": {
|
847 |
-
"train": {
|
848 |
-
"name": "train",
|
849 |
-
"num_bytes": 0,
|
850 |
-
"num_examples": 0,
|
851 |
-
"dataset_name": "dataset"
|
852 |
-
}
|
853 |
-
},
|
854 |
-
"download_checksums": {},
|
855 |
-
"download_size": 0,
|
856 |
-
"dataset_size": 0,
|
857 |
-
"size_in_bytes": 0,
|
858 |
-
"config_name": "real_scenes_seesaw_scene_128P"
|
859 |
-
},
|
860 |
-
"real_scenes_spring_scene_128P": {
|
861 |
-
"description": "Causal3D is a benchmark for evaluating causal reasoning in physical and hypothetical visual scenes. \nIt includes both real-world recordings and rendered synthetic scenes demonstrating causal interactions.\n",
|
862 |
-
"citation": "@article{liu2025causal3d,\n title={CAUSAL3D: A Comprehensive Benchmark for Causal Learning from Visual Data},\n author={Liu, Disheng and Qiao, Yiran and Liu, Wuche and Lu, Yiren and Zhou, Yunlai and Liang, Tuo and Yin, Yu and Ma, Jing},\n journal={arXiv preprint arXiv:2503.04852},\n year={2025}\n}",
|
863 |
-
"homepage": "https://huggingface.co/datasets/LLDDSS/Causal3D",
|
864 |
-
"license": "CC-BY-4.0",
|
865 |
-
"features": {
|
866 |
-
"image": {
|
867 |
-
"_type": "Image"
|
868 |
-
},
|
869 |
-
"file_name": {
|
870 |
-
"dtype": "string",
|
871 |
-
"_type": "Value"
|
872 |
-
},
|
873 |
-
"metadata": {
|
874 |
-
"dtype": "string",
|
875 |
-
"_type": "Value"
|
876 |
-
}
|
877 |
-
},
|
878 |
-
"builder_name": "dataset",
|
879 |
-
"version": {
|
880 |
-
"version_str": "1.0.0",
|
881 |
-
"major": 1,
|
882 |
-
"minor": 0,
|
883 |
-
"patch": 0
|
884 |
-
},
|
885 |
-
"splits": {
|
886 |
-
"train": {
|
887 |
-
"name": "train",
|
888 |
-
"num_bytes": 0,
|
889 |
-
"num_examples": 0,
|
890 |
-
"dataset_name": "dataset"
|
891 |
-
}
|
892 |
-
},
|
893 |
-
"download_checksums": {},
|
894 |
-
"download_size": 0,
|
895 |
-
"dataset_size": 0,
|
896 |
-
"size_in_bytes": 0,
|
897 |
-
"config_name": "real_scenes_spring_scene_128P"
|
898 |
-
}
|
899 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00005.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00006.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00007.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00008.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00009.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00010.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00011.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00012.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00013.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00014.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00015.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00016.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00017.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00018.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00019.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00020.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00021.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00022.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00023.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00024.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00025.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00026.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00027.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00028.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00029.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00030.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00031.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00032.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00033.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00034.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00035.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00036.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00037.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00038.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00039.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00040.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00041.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00042.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00043.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00044.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00045.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00046.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00047.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00048.png
DELETED
Git LFS Details
|
hypothetical_scenes/Hypothetic_v2_linear/part_000/00049.png
DELETED
Git LFS Details
|