Datasets:
add README and dataloader
Browse files- README.md +167 -3
- deepfurniture.py +115 -0
- uncompress_dataset.sh +129 -0
README.md
CHANGED
@@ -1,3 +1,167 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# DeepFurniture Dataset
|
2 |
+
|
3 |
+
A large-scale dataset for furniture understanding, featuring **photo-realistic rendered indoor scenes** with **high-quality 3D furniture models**. The dataset contains about 24k indoor images, 170k furniture instances, and 20k unique furniture identities, all rendered by the leading industry-level rendering engines in [Kujiale](https://coohom.com).
|
4 |
+
|
5 |
+
## Key Features
|
6 |
+
|
7 |
+
- **Photo-Realistic Rendering**: All indoor scenes are rendered using professional rendering engines, providing realistic lighting, shadows, and textures
|
8 |
+
- **High-Quality 3D Models**: Each furniture identity is derived from a professional 3D model, ensuring accurate geometry and material representation
|
9 |
+
- **Rich Annotations**: Hierarchical annotations at image, instance, and identity levels
|
10 |
+
|
11 |
+
|
12 |
+
## Dataset Overview
|
13 |
+
|
14 |
+
DeepFurniture provides hierarchical annotations at three levels:
|
15 |
+
- **Image Level**: Professional rendered indoor scenes with scene category and depth map
|
16 |
+
- **Instance Level**: Bounding boxes and per-pixel masks for furniture instances in scenes
|
17 |
+
- **Identity Level**: High-quality rendered previews of 3D furniture models.
|
18 |
+
|
19 |
+
### Statistics
|
20 |
+
- Total scenes: ~24,000 photo-realistic rendered images
|
21 |
+
- Total furniture instances: ~170,000 annotated instances in scenes
|
22 |
+
- Unique furniture identities: ~20,000 3D models with preview renderings
|
23 |
+
- Categories: 11 furniture types
|
24 |
+
- Style tags: 11 different styles
|
25 |
+
|
26 |
+
### Categories
|
27 |
+
1. Cabinet/Shelf
|
28 |
+
2. Table
|
29 |
+
3. Chair/Stool
|
30 |
+
4. Lamp
|
31 |
+
5. Door
|
32 |
+
6. Bed
|
33 |
+
7. Sofa
|
34 |
+
8. Plant
|
35 |
+
9. Decoration
|
36 |
+
10. Curtain
|
37 |
+
11. Home Appliance
|
38 |
+
|
39 |
+
### Style Tags
|
40 |
+
1. Modern
|
41 |
+
2. Country
|
42 |
+
3. European/American
|
43 |
+
4. Chinese
|
44 |
+
5. Japanese
|
45 |
+
6. Mediterranean
|
46 |
+
7. Southeast-Asian
|
47 |
+
8. Nordic
|
48 |
+
9. Industrial
|
49 |
+
10. Eclectic
|
50 |
+
11. Other
|
51 |
+
|
52 |
+
## Dataset Structure
|
53 |
+
|
54 |
+
The dataset is organized in chunks for efficient distribution:
|
55 |
+
|
56 |
+
```
|
57 |
+
data/
|
58 |
+
├── scenes/ # Photo-realistic rendered indoor scenes
|
59 |
+
├── furnitures/ # High-quality 3D model preview renders
|
60 |
+
├── queries/ # Query instance images from scenes
|
61 |
+
└── metadata/ # Dataset information and indices
|
62 |
+
├── categories.json # Furniture category definitions
|
63 |
+
├── styles.json # Style tag definitions
|
64 |
+
├── dataset_info.json # Dataset statistics and information
|
65 |
+
├── furnitures.jsonl # Furniture metadata
|
66 |
+
└── *_index.json # Chunk index files
|
67 |
+
```
|
68 |
+
|
69 |
+
## Using the Dataset
|
70 |
+
|
71 |
+
### 1. Download and Extraction
|
72 |
+
|
73 |
+
```bash
|
74 |
+
# Clone the repository
|
75 |
+
git lfs install # Make sure Git LFS is installed
|
76 |
+
git clone https://huggingface.co/datasets/byliu/DeepFurniture
|
77 |
+
|
78 |
+
# [optional] Extract the dataset (or load the dataset using the compressed files, TBD)
|
79 |
+
./uncompress_dataset.sh -s ./DeepFurniture/data -t ./regular_dataset
|
80 |
+
```
|
81 |
+
|
82 |
+
### 2. Data Format
|
83 |
+
|
84 |
+
#### Scene Data
|
85 |
+
- **Image**: RGB images in JPG format
|
86 |
+
- **Depth**: Depth maps in PNG format
|
87 |
+
- **Annotation**: JSON files containing:
|
88 |
+
```json
|
89 |
+
{
|
90 |
+
"instances": [
|
91 |
+
{
|
92 |
+
"numberID": 1,
|
93 |
+
"boundingBox": {
|
94 |
+
"xMin": int,
|
95 |
+
"xMax": int,
|
96 |
+
"yMin": int,
|
97 |
+
"yMax": int
|
98 |
+
},
|
99 |
+
"styleIDs": [int],
|
100 |
+
"styleNames": [str],
|
101 |
+
"segmentation": [int], # COCO format RLE encoding
|
102 |
+
"identityID": int,
|
103 |
+
"categoryID": int,
|
104 |
+
"categoryName": str
|
105 |
+
}
|
106 |
+
]
|
107 |
+
}
|
108 |
+
```
|
109 |
+
|
110 |
+
#### Furniture Data
|
111 |
+
- Preview images of 3D models in JPG format
|
112 |
+
- Metadata in JSONL format containing category and style information
|
113 |
+
|
114 |
+
#### Query Data
|
115 |
+
- Cropped furniture instances from scenes
|
116 |
+
- Filename format: `[furnitureID]_[instanceIndex]_[sceneID].jpg`
|
117 |
+
|
118 |
+
### 3. Loading the Dataset
|
119 |
+
|
120 |
+
```python
|
121 |
+
from deepfurniture import DeepFurnitureDataset
|
122 |
+
|
123 |
+
# Initialize dataset
|
124 |
+
dataset = DeepFurnitureDataset("path/to/regular_dataset")
|
125 |
+
|
126 |
+
# Access a scene
|
127 |
+
scene = dataset[0]
|
128 |
+
print(f"Scene ID: {scene['scene_id']}")
|
129 |
+
print(f"Number of instances: {len(scene['instances'])}")
|
130 |
+
|
131 |
+
# Access furniture instances
|
132 |
+
for instance in scene['instances']:
|
133 |
+
print(f"Category: {instance['category_name']}")
|
134 |
+
print(f"Style(s): {instance['style_names']}")
|
135 |
+
```
|
136 |
+
|
137 |
+
## Benchmarks
|
138 |
+
|
139 |
+
This dataset supports three main benchmarks:
|
140 |
+
1. Furniture Detection/Segmentation
|
141 |
+
2. Furniture Instance Retrieval
|
142 |
+
3. Furniture Retrieval
|
143 |
+
|
144 |
+
For benchmark details and baselines, please refer to our paper.
|
145 |
+
|
146 |
+
## Paper
|
147 |
+
This dataset is introduced in our paper:
|
148 |
+
[Furnishing Your Room by What You See: An End-to-End Furniture Set Retrieval Framework with Rich Annotated Benchmark Dataset](https://arxiv.org/abs/1911.09299)
|
149 |
+
|
150 |
+
If you use this dataset, please cite:
|
151 |
+
```bibtex
|
152 |
+
@article{liu2024deepfurniture,
|
153 |
+
title={Furnishing Your Room by What You See: An End-to-End Furniture Set Retrieval Framework with Rich Annotated Benchmark Dataset},
|
154 |
+
author={Liu, Bingyuan and Zhang, Jiantao and Zhang, Xiaoting and Zhang, Wei and Yu, Chuanhui and Zhou, Yuan},
|
155 |
+
journal={arXiv preprint arXiv:1911.09299},
|
156 |
+
year={2020}
|
157 |
+
}
|
158 |
+
```
|
159 |
+
|
160 |
+
## License
|
161 |
+
Apache-2.0
|
162 |
+
|
163 |
+
## Acknowledgments
|
164 |
+
|
165 |
+
- Dataset created by [Kujiale](https://coohom.com)
|
166 |
+
- Rendered using the leading interior design platform in [Kujiale](https://coohom.com)
|
167 |
+
- Thanks to millions of designers and artists who contributed to the 3D models and designs
|
deepfurniture.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from pathlib import Path
|
3 |
+
from PIL import Image
|
4 |
+
from typing import Dict
|
5 |
+
|
6 |
+
|
7 |
+
class DeepFurnitureDataset:
|
8 |
+
"""A simple dataset loader for DeepFurniture dataset."""
|
9 |
+
|
10 |
+
def __init__(self, data_dir: str):
|
11 |
+
"""Initialize the dataset loader.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
data_dir: Path to the dataset directory
|
15 |
+
"""
|
16 |
+
self.data_dir = Path(data_dir)
|
17 |
+
|
18 |
+
# Load metadata
|
19 |
+
with open(self.data_dir / "metadata" / "categories.json") as f:
|
20 |
+
self.categories = json.load(f)
|
21 |
+
with open(self.data_dir / "metadata" / "styles.json") as f:
|
22 |
+
self.styles = json.load(f)
|
23 |
+
|
24 |
+
# Get scene directories
|
25 |
+
self.scene_dirs = sorted(p for p in (self.data_dir / "scenes").iterdir() if p.is_dir())
|
26 |
+
|
27 |
+
def __len__(self) -> int:
|
28 |
+
"""Get number of scenes in dataset."""
|
29 |
+
return len(self.scene_dirs)
|
30 |
+
|
31 |
+
def __getitem__(self, idx: int) -> Dict:
|
32 |
+
"""Get a single scene with its annotations and related data.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
Dict containing:
|
36 |
+
- scene_id (str): Scene identifier
|
37 |
+
- image (PIL.Image): Scene image
|
38 |
+
- depth (PIL.Image or None): Depth map if available
|
39 |
+
- has_depth (bool): Whether depth map is available
|
40 |
+
- instances (List[Dict]): List of furniture instances with:
|
41 |
+
- category_id: Category identifier
|
42 |
+
- category_name: Category name
|
43 |
+
- identity_id: Furniture identity identifier
|
44 |
+
- style_ids: List of style identifiers
|
45 |
+
- style_names: List of style names
|
46 |
+
- bounding_box: Dict with xmin, ymin, xmax, ymax
|
47 |
+
- segmentation: Segmentation mask
|
48 |
+
- furniture_previews (Dict[str, PIL.Image]): Mapping of furniture IDs to preview images
|
49 |
+
"""
|
50 |
+
# Get scene directory
|
51 |
+
scene_dir = self.scene_dirs[idx]
|
52 |
+
scene_id = scene_dir.name
|
53 |
+
|
54 |
+
# Load image
|
55 |
+
image = Image.open(scene_dir / "image.jpg")
|
56 |
+
|
57 |
+
# Load depth if available
|
58 |
+
depth = None
|
59 |
+
has_depth = False
|
60 |
+
depth_path = scene_dir / "depth.png"
|
61 |
+
if depth_path.exists():
|
62 |
+
try:
|
63 |
+
depth = Image.open(depth_path)
|
64 |
+
has_depth = True
|
65 |
+
except Exception as e:
|
66 |
+
print(f"Warning: Failed to load depth map for scene {scene_id}: {e}")
|
67 |
+
|
68 |
+
# Load annotation
|
69 |
+
with open(scene_dir / "annotation.json") as f:
|
70 |
+
annotation = json.load(f)
|
71 |
+
|
72 |
+
# Process instances
|
73 |
+
instances = []
|
74 |
+
furniture_ids = set()
|
75 |
+
for inst in annotation["instances"]:
|
76 |
+
instance_data = {
|
77 |
+
"category_id": inst["categoryID"],
|
78 |
+
"category_name": inst["categoryName"],
|
79 |
+
"identity_id": inst["identityID"],
|
80 |
+
"style_ids": inst["styleIDs"],
|
81 |
+
"style_names": inst["styleNames"],
|
82 |
+
"bounding_box": {
|
83 |
+
"xmin": inst["boundingBox"]["xMin"],
|
84 |
+
"ymin": inst["boundingBox"]["yMin"],
|
85 |
+
"xmax": inst["boundingBox"]["xMax"],
|
86 |
+
"ymax": inst["boundingBox"]["yMax"],
|
87 |
+
},
|
88 |
+
"segmentation": inst["segmentation"] if "segmentation" in inst else None,
|
89 |
+
}
|
90 |
+
instances.append(instance_data)
|
91 |
+
furniture_ids.add(str(inst["identityID"]))
|
92 |
+
|
93 |
+
# Load furniture previews
|
94 |
+
furniture_previews = {}
|
95 |
+
for furniture_id in furniture_ids:
|
96 |
+
furniture_path = self.data_dir / "furnitures" / f"{furniture_id}.jpg"
|
97 |
+
if furniture_path.exists():
|
98 |
+
try:
|
99 |
+
furniture_previews[furniture_id] = Image.open(furniture_path)
|
100 |
+
except Exception as e:
|
101 |
+
print(f"Warning: Failed to load furniture preview {furniture_id}: {e}")
|
102 |
+
|
103 |
+
return {
|
104 |
+
"scene_id": scene_id,
|
105 |
+
"image": image,
|
106 |
+
"depth": depth,
|
107 |
+
"has_depth": has_depth,
|
108 |
+
"instances": instances,
|
109 |
+
"furniture_previews": furniture_previews
|
110 |
+
}
|
111 |
+
|
112 |
+
def __iter__(self):
|
113 |
+
"""Iterate through all scenes."""
|
114 |
+
for i in range(len(self)):
|
115 |
+
yield self[i]
|
uncompress_dataset.sh
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
# Usage function
|
4 |
+
usage() {
|
5 |
+
echo "Usage: $0 -s SOURCE_DIR -t TARGET_DIR [-c CHUNK_TYPE] [-h]"
|
6 |
+
echo "Uncompress chunked DeepFurniture dataset"
|
7 |
+
echo ""
|
8 |
+
echo "Required arguments:"
|
9 |
+
echo " -s SOURCE_DIR Source directory containing the chunked dataset"
|
10 |
+
echo " -t TARGET_DIR Target directory for the uncompressed dataset"
|
11 |
+
echo ""
|
12 |
+
echo "Optional arguments:"
|
13 |
+
echo " -c CHUNK_TYPE Specific chunk type to process (scenes, furnitures, queries)"
|
14 |
+
echo " If not specified, all chunk types will be processed"
|
15 |
+
echo " -h Show this help message"
|
16 |
+
exit 1
|
17 |
+
}
|
18 |
+
|
19 |
+
# Process command line arguments
|
20 |
+
while getopts "s:t:c:h" opt; do
|
21 |
+
case $opt in
|
22 |
+
s) SOURCE_DIR="$OPTARG";;
|
23 |
+
t) TARGET_DIR="$OPTARG";;
|
24 |
+
c) CHUNK_TYPE="$OPTARG";;
|
25 |
+
h) usage;;
|
26 |
+
?) usage;;
|
27 |
+
esac
|
28 |
+
done
|
29 |
+
|
30 |
+
# Check required arguments
|
31 |
+
if [ -z "$SOURCE_DIR" ] || [ -z "$TARGET_DIR" ]; then
|
32 |
+
echo "Error: Source and target directories are required"
|
33 |
+
usage
|
34 |
+
fi
|
35 |
+
|
36 |
+
# Validate source directory
|
37 |
+
if [ ! -d "$SOURCE_DIR" ]; then
|
38 |
+
echo "Error: Source directory does not exist: $SOURCE_DIR"
|
39 |
+
exit 1
|
40 |
+
fi
|
41 |
+
|
42 |
+
# Create target directory structure
|
43 |
+
mkdir -p "$TARGET_DIR"/{metadata,scenes,furnitures,queries}
|
44 |
+
|
45 |
+
# Copy metadata files (excluding index files)
|
46 |
+
echo "Copying metadata files..."
|
47 |
+
for file in "$SOURCE_DIR"/metadata/*.json*; do
|
48 |
+
if [[ ! $file =~ _index.json$ ]]; then
|
49 |
+
cp "$file" "$TARGET_DIR/metadata/"
|
50 |
+
fi
|
51 |
+
done
|
52 |
+
|
53 |
+
# Function to process chunks of a specific type
|
54 |
+
process_chunks() {
|
55 |
+
local type=$1
|
56 |
+
local src_dir="$SOURCE_DIR/$type"
|
57 |
+
local target_dir="$TARGET_DIR/$type"
|
58 |
+
|
59 |
+
echo "Processing $type chunks..."
|
60 |
+
|
61 |
+
# Check if source directory exists
|
62 |
+
if [ ! -d "$src_dir" ]; then
|
63 |
+
echo "Warning: Directory not found: $src_dir"
|
64 |
+
return
|
65 |
+
}
|
66 |
+
|
67 |
+
# Count total chunks for progress
|
68 |
+
total_chunks=$(ls "$src_dir"/*.tar.gz 2>/dev/null | wc -l)
|
69 |
+
if [ "$total_chunks" -eq 0 ]; then
|
70 |
+
echo "No chunks found in $src_dir"
|
71 |
+
return
|
72 |
+
}
|
73 |
+
|
74 |
+
# Process each chunk
|
75 |
+
current=0
|
76 |
+
for chunk in "$src_dir"/*.tar.gz; do
|
77 |
+
current=$((current + 1))
|
78 |
+
chunk_name=$(basename "$chunk")
|
79 |
+
printf "Extracting %s (%d/%d)..." "$chunk_name" "$current" "$total_chunks"
|
80 |
+
|
81 |
+
if tar -xzf "$chunk" -C "$target_dir" 2>/dev/null; then
|
82 |
+
echo " done"
|
83 |
+
else
|
84 |
+
echo " failed"
|
85 |
+
echo "Warning: Failed to extract $chunk_name"
|
86 |
+
fi
|
87 |
+
done
|
88 |
+
}
|
89 |
+
|
90 |
+
# Process chunks based on input
|
91 |
+
if [ -n "$CHUNK_TYPE" ]; then
|
92 |
+
case "$CHUNK_TYPE" in
|
93 |
+
scenes|furnitures|queries)
|
94 |
+
process_chunks "$CHUNK_TYPE"
|
95 |
+
;;
|
96 |
+
*)
|
97 |
+
echo "Error: Invalid chunk type: $CHUNK_TYPE"
|
98 |
+
echo "Valid types are: scenes, furnitures, queries"
|
99 |
+
exit 1
|
100 |
+
;;
|
101 |
+
esac
|
102 |
+
else
|
103 |
+
# Process all chunk types
|
104 |
+
for type in scenes furnitures queries; do
|
105 |
+
process_chunks "$type"
|
106 |
+
done
|
107 |
+
fi
|
108 |
+
|
109 |
+
# Basic validation
|
110 |
+
echo -e "\nValidating extracted files..."
|
111 |
+
|
112 |
+
# Check scenes
|
113 |
+
if [ -z "$CHUNK_TYPE" ] || [ "$CHUNK_TYPE" = "scenes" ]; then
|
114 |
+
missing_files=0
|
115 |
+
for scene_dir in "$TARGET_DIR"/scenes/*; do
|
116 |
+
if [ -d "$scene_dir" ]; then
|
117 |
+
for required in "image.jpg" "annotation.json"; do
|
118 |
+
if [ ! -f "$scene_dir/$required" ]; then
|
119 |
+
echo "Warning: Missing $required in $(basename "$scene_dir")"
|
120 |
+
missing_files=$((missing_files + 1))
|
121 |
+
fi
|
122 |
+
done
|
123 |
+
fi
|
124 |
+
done
|
125 |
+
echo "Scene validation complete. Missing files: $missing_files"
|
126 |
+
fi
|
127 |
+
|
128 |
+
echo "Dataset uncompression completed!"
|
129 |
+
echo "Output directory: $TARGET_DIR"
|