Spaces:
Sleeping
Sleeping
Commit
·
024905c
1
Parent(s):
cff8dcb
up 3
Browse files- app.py +57 -1
- requirements.txt +1 -0
- up_models/modelx2.ort +0 -0
- up_models/modelx4.ort +0 -0
app.py
CHANGED
|
@@ -20,7 +20,8 @@ from diffusers import DiffusionPipeline
|
|
| 20 |
from diffusers import StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline
|
| 21 |
from diffusers import StableDiffusionUpscalePipeline
|
| 22 |
from diffusers import LDMSuperResolutionPipeline
|
| 23 |
-
|
|
|
|
| 24 |
|
| 25 |
def removeFurniture(input_img1,
|
| 26 |
input_img2,
|
|
@@ -105,7 +106,59 @@ def upscale2(image, prompt):
|
|
| 105 |
upscaled_image = pipeline(image, num_inference_steps=10, eta=1).images[0]
|
| 106 |
return upscaled_image
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
| 110 |
|
| 111 |
with gr.Blocks() as app:
|
|
@@ -141,6 +194,9 @@ with gr.Blocks() as app:
|
|
| 141 |
with gr.Column():
|
| 142 |
gr.Button("Upscale2").click(upscale2, inputs=[gr.Image(type="pil"),gr.Textbox(label="prompt",value="empty room")], outputs=gr.Image())
|
| 143 |
|
|
|
|
|
|
|
|
|
|
| 144 |
|
| 145 |
app.launch(debug=True,share=True)
|
| 146 |
|
|
|
|
| 20 |
from diffusers import StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline
|
| 21 |
from diffusers import StableDiffusionUpscalePipeline
|
| 22 |
from diffusers import LDMSuperResolutionPipeline
|
| 23 |
+
import cv2
|
| 24 |
+
import onnxruntime
|
| 25 |
|
| 26 |
def removeFurniture(input_img1,
|
| 27 |
input_img2,
|
|
|
|
| 106 |
upscaled_image = pipeline(image, num_inference_steps=10, eta=1).images[0]
|
| 107 |
return upscaled_image
|
| 108 |
|
| 109 |
+
def convert_pil_to_cv2(image):
|
| 110 |
+
# pil_image = image.convert("RGB")
|
| 111 |
+
open_cv_image = np.array(image)
|
| 112 |
+
# RGB to BGR
|
| 113 |
+
open_cv_image = open_cv_image[:, :, ::-1].copy()
|
| 114 |
+
return open_cv_image
|
| 115 |
+
|
| 116 |
+
def inference(model_path: str, img_array: np.array) -> np.array:
|
| 117 |
+
options = onnxruntime.SessionOptions()
|
| 118 |
+
options.intra_op_num_threads = 1
|
| 119 |
+
options.inter_op_num_threads = 1
|
| 120 |
+
ort_session = onnxruntime.InferenceSession(model_path, options)
|
| 121 |
+
ort_inputs = {ort_session.get_inputs()[0].name: img_array}
|
| 122 |
+
ort_outs = ort_session.run(None, ort_inputs)
|
| 123 |
+
|
| 124 |
+
return ort_outs[0]
|
| 125 |
+
|
| 126 |
+
def post_process(img: np.array) -> np.array:
|
| 127 |
+
# 1, C, H, W -> C, H, W
|
| 128 |
+
img = np.squeeze(img)
|
| 129 |
+
# C, H, W -> H, W, C
|
| 130 |
+
img = np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)
|
| 131 |
+
return img
|
| 132 |
+
|
| 133 |
+
def pre_process(img: np.array) -> np.array:
|
| 134 |
+
# H, W, C -> C, H, W
|
| 135 |
+
img = np.transpose(img[:, :, 0:3], (2, 0, 1))
|
| 136 |
+
# C, H, W -> 1, C, H, W
|
| 137 |
+
img = np.expand_dims(img, axis=0).astype(np.float32)
|
| 138 |
+
return img
|
| 139 |
+
|
| 140 |
+
def upscale3(image):
|
| 141 |
+
model_path = f"up_models/modelx4.ort"
|
| 142 |
+
img = convert_pil_to_cv2(image)
|
| 143 |
+
|
| 144 |
+
if img.ndim == 2:
|
| 145 |
+
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
| 146 |
+
|
| 147 |
+
if img.shape[2] == 4:
|
| 148 |
+
alpha = img[:, :, 3] # GRAY
|
| 149 |
+
alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR
|
| 150 |
+
alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR
|
| 151 |
+
alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY
|
| 152 |
+
|
| 153 |
+
img = img[:, :, 0:3] # BGR
|
| 154 |
+
image_output = post_process(inference(model_path, pre_process(img))) # BGR
|
| 155 |
+
image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA
|
| 156 |
+
image_output[:, :, 3] = alpha_output
|
| 157 |
|
| 158 |
+
elif img.shape[2] == 3:
|
| 159 |
+
image_output = post_process(inference(model_path, pre_process(img))) # BGR
|
| 160 |
+
|
| 161 |
+
return image_output
|
| 162 |
|
| 163 |
|
| 164 |
with gr.Blocks() as app:
|
|
|
|
| 194 |
with gr.Column():
|
| 195 |
gr.Button("Upscale2").click(upscale2, inputs=[gr.Image(type="pil"),gr.Textbox(label="prompt",value="empty room")], outputs=gr.Image())
|
| 196 |
|
| 197 |
+
with gr.Column():
|
| 198 |
+
gr.Button("Upscale3").click(upscale3, inputs=[gr.Image(type="pil")], outputs=gr.Image())
|
| 199 |
+
|
| 200 |
|
| 201 |
app.launch(debug=True,share=True)
|
| 202 |
|
requirements.txt
CHANGED
|
@@ -9,3 +9,4 @@ python-docx
|
|
| 9 |
triton
|
| 10 |
altair<5
|
| 11 |
gradio
|
|
|
|
|
|
| 9 |
triton
|
| 10 |
altair<5
|
| 11 |
gradio
|
| 12 |
+
onnxruntime==1.12.0
|
up_models/modelx2.ort
ADDED
|
Binary file (147 kB). View file
|
|
|
up_models/modelx4.ort
ADDED
|
Binary file (261 kB). View file
|
|
|