chongjie ameerazam08 commited on
Commit
2a3d474
0 Parent(s):

Duplicate from ameerazam08/zoe-depth

Browse files

Co-authored-by: Ameer Azam <[email protected]>

Files changed (7) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +36 -0
  4. geometry.py +72 -0
  5. gradio_depth_pred.py +28 -0
  6. requirements.txt +5 -0
  7. utils.py +85 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Zoe Depth
3
+ emoji: 🌍
4
+ colorFrom: green
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 3.19.1
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: ameerazam08/zoe-depth
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+
4
+ from gradio_depth_pred import create_demo as create_depth_pred_demo
5
+
6
+
7
+ css = """
8
+ #img-display-container {
9
+ max-height: 50vh;
10
+ }
11
+ #img-display-input {
12
+ max-height: 40vh;
13
+ }
14
+ #img-display-output {
15
+ max-height: 40vh;
16
+ }
17
+
18
+ """
19
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
20
+ model = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).to(DEVICE).eval()
21
+
22
+ title = "# ZoeDepth"
23
+ description = """Official demo for **ZoeDepth: Zero-shot Transfer by Combining Relative and Metric Depth**.
24
+ ZoeDepth is a deep learning model for metric depth estimation from a single image.
25
+ Please refer to our [paper](https://arxiv.org/abs/2302.12288) or [github](https://github.com/isl-org/ZoeDepth) for more details."""
26
+
27
+ with gr.Blocks(css=css) as demo:
28
+ gr.Markdown(title)
29
+ gr.Markdown(description)
30
+ with gr.Tab("Depth Prediction"):
31
+ create_depth_pred_demo(model)
32
+ gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/shariqfarooq/ZoeDepth?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
33
+ <p><img src="https://visitor-badge.glitch.me/badge?page_id=shariqfarooq.zoedepth_demo_hf" alt="visitors"></p></center>''')
34
+
35
+ if __name__ == '__main__':
36
+ demo.queue().launch()
geometry.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ def get_intrinsics(H,W):
4
+ """
5
+ Intrinsics for a pinhole camera model.
6
+ Assume fov of 55 degrees and central principal point.
7
+ """
8
+ f = 0.5 * W / np.tan(0.5 * 55 * np.pi / 180.0)
9
+ cx = 0.5 * W
10
+ cy = 0.5 * H
11
+ return np.array([[f, 0, cx],
12
+ [0, f, cy],
13
+ [0, 0, 1]])
14
+
15
+ def depth_to_points(depth, R=None, t=None):
16
+
17
+ K = get_intrinsics(depth.shape[1], depth.shape[2])
18
+ Kinv = np.linalg.inv(K)
19
+ if R is None:
20
+ R = np.eye(3)
21
+ if t is None:
22
+ t = np.zeros(3)
23
+
24
+ # M converts from your coordinate to PyTorch3D's coordinate system
25
+ M = np.eye(3)
26
+ M[0, 0] = -1.0
27
+ M[1, 1] = -1.0
28
+
29
+ height, width = depth.shape[1:3]
30
+
31
+ x = np.arange(width)
32
+ y = np.arange(height)
33
+ coord = np.stack(np.meshgrid(x, y), -1)
34
+ coord = np.concatenate((coord, np.ones_like(coord)[:, :, [0]]), -1) # z=1
35
+ coord = coord.astype(np.float32)
36
+ # coord = torch.as_tensor(coord, dtype=torch.float32, device=device)
37
+ coord = coord[None] # bs, h, w, 3
38
+
39
+ D = depth[:, :, :, None, None]
40
+ # print(D.shape, Kinv[None, None, None, ...].shape, coord[:, :, :, :, None].shape )
41
+ pts3D_1 = D * Kinv[None, None, None, ...] @ coord[:, :, :, :, None]
42
+ # pts3D_1 live in your coordinate system. Convert them to Py3D's
43
+ pts3D_1 = M[None, None, None, ...] @ pts3D_1
44
+ # from reference to targe tviewpoint
45
+ pts3D_2 = R[None, None, None, ...] @ pts3D_1 + t[None, None, None, :, None]
46
+ # pts3D_2 = pts3D_1
47
+ # depth_2 = pts3D_2[:, :, :, 2, :] # b,1,h,w
48
+ return pts3D_2[:, :, :, :3, 0][0]
49
+
50
+
51
+ def create_triangles(h, w, mask=None):
52
+ """Creates mesh triangle indices from a given pixel grid size.
53
+ This function is not and need not be differentiable as triangle indices are
54
+ fixed.
55
+ Args:
56
+ h: (int) denoting the height of the image.
57
+ w: (int) denoting the width of the image.
58
+ Returns:
59
+ triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3)
60
+ """
61
+ x, y = np.meshgrid(range(w - 1), range(h - 1))
62
+ tl = y * w + x
63
+ tr = y * w + x + 1
64
+ bl = (y + 1) * w + x
65
+ br = (y + 1) * w + x + 1
66
+ triangles = np.array([tl, bl, tr, br, tr, bl])
67
+ triangles = np.transpose(triangles, (1, 2, 0)).reshape(
68
+ ((w - 1) * (h - 1) * 2, 3))
69
+ if mask is not None:
70
+ mask = mask.reshape(-1)
71
+ triangles = triangles[mask[triangles].all(1)]
72
+ return triangles
gradio_depth_pred.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from utils import colorize
3
+ from PIL import Image
4
+ import tempfile
5
+
6
+ def predict_depth(model, image):
7
+ depth = model.infer_pil(image)
8
+ return depth
9
+
10
+ def create_demo(model):
11
+ gr.Markdown("### Depth Prediction demo")
12
+ with gr.Row():
13
+ input_image = gr.Image(label="Input Image", type='pil', elem_id='img-display-input').style(height="auto")
14
+ depth_image = gr.Image(label="Depth Map", elem_id='img-display-output')
15
+ raw_file = gr.File(label="16-bit raw depth, multiplier:256")
16
+ submit = gr.Button("Submit")
17
+
18
+ def on_submit(image):
19
+ depth = predict_depth(model, image)
20
+ colored_depth = colorize(depth, cmap='gray_r')
21
+ tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
22
+ raw_depth = Image.fromarray((depth*256).astype('uint16'))
23
+ raw_depth.save(tmp.name)
24
+ return [colored_depth, tmp.name]
25
+
26
+ submit.click(on_submit, inputs=[input_image], outputs=[depth_image, raw_file])
27
+ # examples = gr.Examples(examples=["examples/person_1.jpeg", "examples/person_2.jpeg", "examples/person-leaves.png", "examples/living-room.jpeg"],
28
+ # inputs=[input_image])
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch
3
+ timm==0.6.11
4
+ torchvision==0.11.2
5
+ trimesh==3.9.42
utils.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import matplotlib
26
+ import matplotlib.cm
27
+ import numpy as np
28
+ import torch
29
+
30
+ def colorize(value, vmin=None, vmax=None, cmap='magma_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
31
+ """Converts a depth map to a color image.
32
+
33
+ Args:
34
+ value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
35
+ vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
36
+ vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
37
+ cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
38
+ invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
39
+ invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
40
+ background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
41
+ gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
42
+ value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
43
+
44
+ Returns:
45
+ numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
46
+ """
47
+ if isinstance(value, torch.Tensor):
48
+ value = value.detach().cpu().numpy()
49
+
50
+ value = value.squeeze()
51
+ if invalid_mask is None:
52
+ invalid_mask = value == invalid_val
53
+ mask = np.logical_not(invalid_mask)
54
+
55
+ # normalize
56
+ vmin = np.percentile(value[mask],2) if vmin is None else vmin
57
+ vmax = np.percentile(value[mask],85) if vmax is None else vmax
58
+ if vmin != vmax:
59
+ value = (value - vmin) / (vmax - vmin) # vmin..vmax
60
+ else:
61
+ # Avoid 0-division
62
+ value = value * 0.
63
+
64
+ # squeeze last dim if it exists
65
+ # grey out the invalid values
66
+
67
+ value[invalid_mask] = np.nan
68
+ cmapper = matplotlib.cm.get_cmap(cmap)
69
+ if value_transform:
70
+ value = value_transform(value)
71
+ # value = value / value.max()
72
+ value = cmapper(value, bytes=True) # (nxmx4)
73
+
74
+ # img = value[:, :, :]
75
+ img = value[...]
76
+ img[invalid_mask] = background_color
77
+
78
+ # return img.transpose((2, 0, 1))
79
+ if gamma_corrected:
80
+ # gamma correction
81
+ img = img / 255
82
+ img = np.power(img, 2.2)
83
+ img = img * 255
84
+ img = img.astype(np.uint8)
85
+ return img