nayanBhiwapurkar commited on
Commit
2ff962a
·
verified ·
1 Parent(s): 2173d5d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -31
app.py CHANGED
@@ -3,31 +3,51 @@ from PIL import Image, ImageFilter
3
  import numpy as np
4
  import cv2
5
  import torch
6
- from transformers import DPTFeatureExtractor, DPTForDepthEstimation
7
 
8
- # Load model and feature extractor
9
- feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
10
- model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
 
 
11
 
12
- # Gaussian Blur function
13
- def apply_gaussian_blur(image, blur_radius):
14
- return image.filter(ImageFilter.GaussianBlur(blur_radius))
 
 
 
 
15
 
16
- # Lens Blur function
17
- def apply_lens_blur(image):
18
- # Get depth map
19
- inputs = feature_extractor(images=image, return_tensors="pt")
 
 
 
 
 
 
 
 
 
 
 
20
  with torch.no_grad():
21
- outputs = model(**inputs)
22
  depth_map = outputs.predicted_depth.squeeze().cpu().numpy()
 
 
23
  depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min()) * 15
 
24
  depth_map_resized = cv2.resize(depth_map, (image.width, image.height))
25
- depth_map_resized = 15 - depth_map_resized
26
 
27
- # Convert to OpenCV format
28
  image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
29
  blurred_image = np.zeros_like(image_cv, dtype=np.float32)
30
 
 
31
  for blur_radius in range(1, 16):
32
  blurred_layer = cv2.GaussianBlur(image_cv, (0, 0), sigmaX=blur_radius)
33
  mask = ((depth_map_resized >= (blur_radius - 1)) & (depth_map_resized < blur_radius)).astype(np.float32)
@@ -35,30 +55,26 @@ def apply_lens_blur(image):
35
  blurred_image += blurred_layer * mask
36
 
37
  blurred_image = np.clip(blurred_image, 0, 255).astype(np.uint8)
38
- return Image.fromarray(cv2.cvtColor(blurred_image, cv2.COLOR_BGR2RGB))
 
39
 
40
- # Gradio app interface
41
- def process_image(image, effect, blur_radius):
42
- if effect == "Gaussian Blur":
43
- return apply_gaussian_blur(image, blur_radius)
44
  elif effect == "Lens Blur":
45
- return apply_lens_blur(image)
46
- else:
47
- return image
48
 
49
- # Gradio Interface
50
  with gr.Blocks() as demo:
51
- gr.Markdown("# Gaussian and Lens Blur Effects")
52
  with gr.Row():
53
  with gr.Column():
54
- uploaded_image = gr.Image(type="pil")
55
- effect = gr.Radio(["Gaussian Blur", "Lens Blur"], value="Gaussian Blur", label="Effect")
56
- blur_radius = gr.Slider(1, 15, value=5, step=1, label="Blur Radius (for Gaussian Blur)")
57
- submit_button = gr.Button("Apply Effect")
58
  with gr.Column():
59
  output_image = gr.Image(type="pil", label="Processed Image")
 
 
60
 
61
- submit_button.click(process_image, inputs=[uploaded_image, effect, blur_radius], outputs=output_image)
62
-
63
- # Launch the app
64
  demo.launch()
 
3
  import numpy as np
4
  import cv2
5
  import torch
6
+ from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation, DPTFeatureExtractor, DPTForDepthEstimation
7
 
8
+ # Load models
9
+ segformer_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b1-finetuned-ade-512-512")
10
+ segformer_model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b1-finetuned-ade-512-512")
11
+ dpt_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
12
+ dpt_model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
13
 
14
+ # Gaussian Blur Background Function
15
+ def gaussian_blur_background(image):
16
+ # Preprocess image for segmentation
17
+ inputs = segformer_extractor(images=image, return_tensors="pt")
18
+ outputs = segformer_model(**inputs)
19
+ logits = outputs.logits
20
+ segmentation = torch.argmax(logits, dim=1)[0].numpy()
21
 
22
+ # Create a binary mask for 'person' class (class index 12)
23
+ human_mask = (segmentation == 12).astype(np.uint8) * 255
24
+ human_mask_image = Image.fromarray(human_mask).resize(image.size)
25
+
26
+ # Apply Gaussian blur to the entire image
27
+ blurred_background = image.filter(ImageFilter.GaussianBlur(15))
28
+
29
+ # Composite the original image with blurred background using the mask
30
+ composite_image = Image.composite(image, blurred_background, human_mask_image)
31
+ return composite_image
32
+
33
+ # Depth-Based Lens Blur Function
34
+ def lens_blur(image):
35
+ # Preprocess image for depth estimation
36
+ inputs = dpt_extractor(images=image, return_tensors="pt")
37
  with torch.no_grad():
38
+ outputs = dpt_model(**inputs)
39
  depth_map = outputs.predicted_depth.squeeze().cpu().numpy()
40
+
41
+ # Normalize depth map to range [0, 15] and invert for blur intensity
42
  depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min()) * 15
43
+ depth_map = 15 - depth_map
44
  depth_map_resized = cv2.resize(depth_map, (image.width, image.height))
 
45
 
46
+ # Convert image to OpenCV format
47
  image_cv = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
48
  blurred_image = np.zeros_like(image_cv, dtype=np.float32)
49
 
50
+ # Apply variable blur based on depth
51
  for blur_radius in range(1, 16):
52
  blurred_layer = cv2.GaussianBlur(image_cv, (0, 0), sigmaX=blur_radius)
53
  mask = ((depth_map_resized >= (blur_radius - 1)) & (depth_map_resized < blur_radius)).astype(np.float32)
 
55
  blurred_image += blurred_layer * mask
56
 
57
  blurred_image = np.clip(blurred_image, 0, 255).astype(np.uint8)
58
+ blurred_image_pil = Image.fromarray(cv2.cvtColor(blurred_image, cv2.COLOR_BGR2RGB))
59
+ return blurred_image_pil
60
 
61
+ # Gradio Interface
62
+ def process_image(image, effect):
63
+ if effect == "Gaussian Blur Background":
64
+ return gaussian_blur_background(image)
65
  elif effect == "Lens Blur":
66
+ return lens_blur(image)
 
 
67
 
 
68
  with gr.Blocks() as demo:
69
+ gr.Markdown("# BlurEffectsApp: Gaussian and Lens Blur Effects")
70
  with gr.Row():
71
  with gr.Column():
72
+ uploaded_image = gr.Image(type="pil", label="Upload an Image")
73
+ effect = gr.Radio(["Gaussian Blur Background", "Lens Blur"], label="Choose Effect")
74
+ process_button = gr.Button("Apply Effect")
 
75
  with gr.Column():
76
  output_image = gr.Image(type="pil", label="Processed Image")
77
+
78
+ process_button.click(process_image, inputs=[uploaded_image, effect], outputs=output_image)
79
 
 
 
 
80
  demo.launch()