Spaces:
Sleeping
Sleeping
Upload PortraitBlurrer.py
Browse files- PortraitBlurrer.py +122 -0
PortraitBlurrer.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PortraitBlurrer.py
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
|
| 6 |
+
class PortraitBlurrer:
|
| 7 |
+
def __init__(self, max_blur=31, depth_threshold=120,
|
| 8 |
+
feather_strength=3, sharpen_strength=1):
|
| 9 |
+
self.max_blur = max_blur
|
| 10 |
+
# Ensure max_blur is odd and positive
|
| 11 |
+
if self.max_blur % 2 == 0:
|
| 12 |
+
self.max_blur += 1
|
| 13 |
+
if self.max_blur <= 0:
|
| 14 |
+
self.max_blur = 3 # Default odd positive
|
| 15 |
+
|
| 16 |
+
self.depth_threshold = depth_threshold
|
| 17 |
+
self.feather_strength = feather_strength
|
| 18 |
+
self.sharpen_strength = sharpen_strength
|
| 19 |
+
|
| 20 |
+
def refine_depth_map(self, depth_map):
|
| 21 |
+
# Apply a bilateral filter to smooth depth while preserving edges
|
| 22 |
+
refined_depth = cv2.bilateralFilter(depth_map, 9, 75, 75)
|
| 23 |
+
return refined_depth
|
| 24 |
+
|
| 25 |
+
def create_subject_mask(self, depth_map):
|
| 26 |
+
_, mask = cv2.threshold(depth_map, self.depth_threshold, 255, cv2.THRESH_BINARY)
|
| 27 |
+
kernel = np.ones((5, 5), np.uint8)
|
| 28 |
+
mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
|
| 29 |
+
|
| 30 |
+
ksize = self.feather_strength
|
| 31 |
+
if ksize % 2 == 0:
|
| 32 |
+
ksize += 1
|
| 33 |
+
if ksize <= 0:
|
| 34 |
+
ksize = 3
|
| 35 |
+
|
| 36 |
+
mask = cv2.GaussianBlur(mask, (ksize, ksize), 0)
|
| 37 |
+
return mask.astype(np.float32) / 255.0
|
| 38 |
+
|
| 39 |
+
def sharpen_image(self, image):
|
| 40 |
+
# Ensure sharpen_strength is not zero to avoid division issues later if needed
|
| 41 |
+
strength = max(0.1, self.sharpen_strength) # Prevent zero strength
|
| 42 |
+
|
| 43 |
+
# Simple sharpening kernel
|
| 44 |
+
kernel = np.array([[-1, -1, -1],
|
| 45 |
+
[-1, 9, -1],
|
| 46 |
+
[-1, -1, -1]])
|
| 47 |
+
|
| 48 |
+
# Apply the kernel - adjust strength application if needed
|
| 49 |
+
# A common way is to blend the sharpened with original based on strength
|
| 50 |
+
sharpened = cv2.filter2D(image, -1, kernel)
|
| 51 |
+
|
| 52 |
+
# Blend sharpened and original based on strength
|
| 53 |
+
# strength=1 means mostly sharpened, strength close to 0 means mostly original
|
| 54 |
+
if strength != 1.0: # Avoid unnecessary work if strength is 1
|
| 55 |
+
blended = cv2.addWeighted(image, 1.0 - (strength - 1.0) if strength > 1.0 else 1.0 ,
|
| 56 |
+
sharpened, strength if strength <= 1.0 else 1.0, 0)
|
| 57 |
+
# Basic clipping if values go out of range due to sharpening
|
| 58 |
+
return np.clip(blended, 0, 255).astype(np.uint8)
|
| 59 |
+
else:
|
| 60 |
+
# Basic clipping if values go out of range due to sharpening
|
| 61 |
+
return np.clip(sharpened, 0, 255).astype(np.uint8)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def apply_blur(self, original_bgr, depth_map_array):
|
| 65 |
+
# Resize depth map to match image dimensions
|
| 66 |
+
depth_resized = cv2.resize(depth_map_array, (original_bgr.shape[1], original_bgr.shape[0]),
|
| 67 |
+
interpolation=cv2.INTER_LINEAR)
|
| 68 |
+
|
| 69 |
+
refined_depth = self.refine_depth_map(depth_resized)
|
| 70 |
+
mask = self.create_subject_mask(refined_depth) # Float mask [0, 1]
|
| 71 |
+
|
| 72 |
+
blurred = cv2.GaussianBlur(original_bgr, (self.max_blur, self.max_blur), 0)
|
| 73 |
+
|
| 74 |
+
# Only sharpen if strength is significant
|
| 75 |
+
if self.sharpen_strength > 0.05: # Threshold to avoid unnecessary computation
|
| 76 |
+
sharpened_original = self.sharpen_image(original_bgr)
|
| 77 |
+
# Blend sharpened subject with original based on mask
|
| 78 |
+
foreground = sharpened_original * mask[:, :, np.newaxis] + \
|
| 79 |
+
original_bgr * (1 - mask[:, :, np.newaxis])
|
| 80 |
+
else:
|
| 81 |
+
foreground = original_bgr # Use original if no sharpening
|
| 82 |
+
|
| 83 |
+
# Blend the (potentially sharpened) foreground with the blurred background
|
| 84 |
+
background = blurred * (1 - mask[:, :, np.newaxis])
|
| 85 |
+
# Combine the foreground (where mask is 1) and background (where mask is 0)
|
| 86 |
+
# Note: Foreground already contains the original where it wasn't sharpened
|
| 87 |
+
# A potentially better blend:
|
| 88 |
+
result = original_bgr * mask[:, :, np.newaxis] + blurred * (1 - mask[:, :, np.newaxis])
|
| 89 |
+
if self.sharpen_strength > 0.05:
|
| 90 |
+
sharpened_subject_only = self.sharpen_image(original_bgr)
|
| 91 |
+
# Apply sharpening only where the mask is high
|
| 92 |
+
result = sharpened_subject_only * mask[:, :, np.newaxis] + result * (1 - mask[:, :, np.newaxis])
|
| 93 |
+
|
| 94 |
+
# Ensure result is uint8
|
| 95 |
+
final_result = np.clip(result, 0, 255).astype(np.uint8)
|
| 96 |
+
|
| 97 |
+
# Return the final blurred image as a NumPy array (BGR)
|
| 98 |
+
# Also return the refined depth map and the mask for potential display
|
| 99 |
+
return final_result, refined_depth, (mask * 255).astype(np.uint8)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def process_image(self, original_bgr_np, depth_image_pil):
|
| 103 |
+
depth_map_array = np.array(depth_image_pil)
|
| 104 |
+
|
| 105 |
+
if len(depth_map_array.shape) > 2:
|
| 106 |
+
# Assuming input PIL depth map might be RGB, convert to grayscale
|
| 107 |
+
depth_map_array = cv2.cvtColor(depth_map_array, cv2.COLOR_RGB2GRAY)
|
| 108 |
+
elif len(depth_map_array.shape) == 2:
|
| 109 |
+
# Already grayscale, ensure it's uint8 if necessary (though pipeline likely outputs it correctly)
|
| 110 |
+
if depth_map_array.dtype != np.uint8:
|
| 111 |
+
# Normalize if it's float or other types before potential processing
|
| 112 |
+
if depth_map_array.max() > 1.0: # Basic check if it might be 0-255
|
| 113 |
+
depth_map_array = depth_map_array.astype(np.uint8)
|
| 114 |
+
else: # Assume 0-1 float, scale to 0-255
|
| 115 |
+
depth_map_array = (depth_map_array * 255).astype(np.uint8)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
# apply_blur now returns the result, depth map, and mask
|
| 119 |
+
blurred_image_np, refined_depth_np, mask_np = self.apply_blur(original_bgr_np, depth_map_array)
|
| 120 |
+
|
| 121 |
+
# Return the blurred image, the refined depth map (grayscale), and the mask (grayscale)
|
| 122 |
+
return blurred_image_np, refined_depth_np, mask_np
|