Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
#7
by
luluscharf
- opened
app.py
CHANGED
@@ -17,14 +17,14 @@ import tempfile
|
|
17 |
def load_model_by_name(arch_name, checkpoint_path, device):
|
18 |
model = None
|
19 |
if arch_name == 'depthanything':
|
20 |
-
#
|
21 |
-
model_weights = load_file(checkpoint_path) # safetensors
|
22 |
|
23 |
-
#
|
24 |
model = DepthAnything(checkpoint_path=None).to(device)
|
25 |
-
model.load_state_dict(model_weights) #
|
26 |
|
27 |
-
model = model.to(device) #
|
28 |
else:
|
29 |
raise NotImplementedError(f"Unknown architecture: {arch_name}")
|
30 |
return model
|
@@ -37,8 +37,14 @@ def process_image(image, model, device):
|
|
37 |
# Preprocess the image
|
38 |
image_np = np.array(image)[..., ::-1] / 255
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
transform = Compose([
|
41 |
-
Resize(
|
42 |
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
43 |
PrepareForNet()
|
44 |
])
|
@@ -53,8 +59,9 @@ def process_image(image, model, device):
|
|
53 |
# Convert depth map to numpy
|
54 |
pred_disp_np = pred_disp.cpu().detach().numpy()[0, 0, :, :]
|
55 |
|
56 |
-
# Normalize depth map
|
57 |
pred_disp_normalized = (pred_disp_np - pred_disp_np.min()) / (pred_disp_np.max() - pred_disp_np.min())
|
|
|
58 |
|
59 |
# Colorized depth map
|
60 |
cmap = "Spectral_r"
|
@@ -68,19 +75,16 @@ def process_image(image, model, device):
|
|
68 |
|
69 |
# Save raw depth map as a temporary npy file
|
70 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".npy") as temp_file:
|
71 |
-
np.save(temp_file.name,
|
72 |
depth_raw_path = temp_file.name
|
73 |
|
74 |
# Resize outputs to match original image size
|
75 |
-
|
76 |
-
|
77 |
-
depth_gray_hwc = cv2.resize(depth_gray_hwc, (w, h), cv2.INTER_LINEAR)
|
78 |
|
79 |
# Convert to PIL images
|
80 |
return image, Image.fromarray(depth_colored_hwc), Image.fromarray(depth_gray_hwc), depth_raw_path
|
81 |
|
82 |
-
|
83 |
-
|
84 |
# Gradio interface function with GPU support
|
85 |
@spaces.GPU
|
86 |
def gradio_interface(image):
|
@@ -108,10 +112,10 @@ def gradio_interface(image):
|
|
108 |
model = DepthAnything(**model_kwargs['vitl']).to(device)
|
109 |
checkpoint_path = hf_hub_download(repo_id=f"xingyang1/Distill-Any-Depth", filename=f"large/model.safetensors", repo_type="model")
|
110 |
|
111 |
-
#
|
112 |
-
model_weights = load_file(checkpoint_path) # safetensors
|
113 |
model.load_state_dict(model_weights)
|
114 |
-
model = model.to(device) #
|
115 |
|
116 |
if model is None:
|
117 |
return None, None, None, None
|
|
|
17 |
def load_model_by_name(arch_name, checkpoint_path, device):
|
18 |
model = None
|
19 |
if arch_name == 'depthanything':
|
20 |
+
# Use safetensors to load model weights
|
21 |
+
model_weights = load_file(checkpoint_path) # Load using safetensors
|
22 |
|
23 |
+
# Initialize model
|
24 |
model = DepthAnything(checkpoint_path=None).to(device)
|
25 |
+
model.load_state_dict(model_weights) # Apply loaded weights to the model
|
26 |
|
27 |
+
model = model.to(device) # Ensure the model is on the correct device
|
28 |
else:
|
29 |
raise NotImplementedError(f"Unknown architecture: {arch_name}")
|
30 |
return model
|
|
|
37 |
# Preprocess the image
|
38 |
image_np = np.array(image)[..., ::-1] / 255
|
39 |
|
40 |
+
# Resize input image to 1920p while maintaining aspect ratio
|
41 |
+
h, w = image_np.shape[:2]
|
42 |
+
scale = 1920 / max(h, w)
|
43 |
+
new_h, new_w = int(h * scale), int(w * scale)
|
44 |
+
image_np = cv2.resize(image_np, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
|
45 |
+
|
46 |
transform = Compose([
|
47 |
+
Resize(new_h, new_w, resize_target=False, keep_aspect_ratio=True, ensure_multiple_of=14, resize_method='lower_bound', image_interpolation_method=cv2.INTER_CUBIC),
|
48 |
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
49 |
PrepareForNet()
|
50 |
])
|
|
|
59 |
# Convert depth map to numpy
|
60 |
pred_disp_np = pred_disp.cpu().detach().numpy()[0, 0, :, :]
|
61 |
|
62 |
+
# Normalize depth map to 16-bit range [0, 65535]
|
63 |
pred_disp_normalized = (pred_disp_np - pred_disp_np.min()) / (pred_disp_np.max() - pred_disp_np.min())
|
64 |
+
pred_disp_16bit = (pred_disp_normalized * 65535).astype(np.uint16)
|
65 |
|
66 |
# Colorized depth map
|
67 |
cmap = "Spectral_r"
|
|
|
75 |
|
76 |
# Save raw depth map as a temporary npy file
|
77 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".npy") as temp_file:
|
78 |
+
np.save(temp_file.name, pred_disp_16bit)
|
79 |
depth_raw_path = temp_file.name
|
80 |
|
81 |
# Resize outputs to match original image size
|
82 |
+
depth_colored_hwc = cv2.resize(depth_colored_hwc, (new_w, new_h), cv2.INTER_LINEAR)
|
83 |
+
depth_gray_hwc = cv2.resize(depth_gray_hwc, (new_w, new_h), cv2.INTER_LINEAR)
|
|
|
84 |
|
85 |
# Convert to PIL images
|
86 |
return image, Image.fromarray(depth_colored_hwc), Image.fromarray(depth_gray_hwc), depth_raw_path
|
87 |
|
|
|
|
|
88 |
# Gradio interface function with GPU support
|
89 |
@spaces.GPU
|
90 |
def gradio_interface(image):
|
|
|
112 |
model = DepthAnything(**model_kwargs['vitl']).to(device)
|
113 |
checkpoint_path = hf_hub_download(repo_id=f"xingyang1/Distill-Any-Depth", filename=f"large/model.safetensors", repo_type="model")
|
114 |
|
115 |
+
# Use safetensors to load model weights
|
116 |
+
model_weights = load_file(checkpoint_path) # Load using safetensors
|
117 |
model.load_state_dict(model_weights)
|
118 |
+
model = model.to(device) # Ensure the model is on the correct device
|
119 |
|
120 |
if model is None:
|
121 |
return None, None, None, None
|