Spaces:
Sleeping
Sleeping
Commit
·
19b5412
1
Parent(s):
7217180
test2
Browse files
app.py
CHANGED
|
@@ -77,16 +77,18 @@ def segmentation(img):
|
|
| 77 |
|
| 78 |
def upscale(image, prompt):
|
| 79 |
print("upscale",image,prompt)
|
|
|
|
|
|
|
| 80 |
|
| 81 |
# image.thumbnail((512, 512))
|
| 82 |
# print("resize",image)
|
| 83 |
|
| 84 |
-
|
| 85 |
-
#
|
| 86 |
-
|
| 87 |
-
|
| 88 |
|
| 89 |
-
ret =
|
| 90 |
image=image,
|
| 91 |
num_inference_steps=10,
|
| 92 |
guidance_scale=0)
|
|
@@ -98,12 +100,14 @@ def upscale(image, prompt):
|
|
| 98 |
|
| 99 |
def upscale2(image, prompt):
|
| 100 |
print("upscale2",image,prompt)
|
| 101 |
-
|
| 102 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 103 |
-
|
| 104 |
-
pipeline = pipeline.to(device)
|
| 105 |
|
| 106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
return upscaled_image
|
| 108 |
|
| 109 |
def convert_pil_to_cv2(image):
|
|
@@ -138,25 +142,29 @@ def pre_process(img: np.array) -> np.array:
|
|
| 138 |
return img
|
| 139 |
|
| 140 |
def upscale3(image):
|
|
|
|
|
|
|
| 141 |
model_path = f"up_models/modelx4.ort"
|
| 142 |
img = convert_pil_to_cv2(image)
|
| 143 |
|
| 144 |
-
if img.ndim == 2:
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
|
|
|
|
|
|
| 160 |
|
| 161 |
return image_output
|
| 162 |
|
|
|
|
| 77 |
|
| 78 |
def upscale(image, prompt):
|
| 79 |
print("upscale",image,prompt)
|
| 80 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 81 |
+
print("device",device)
|
| 82 |
|
| 83 |
# image.thumbnail((512, 512))
|
| 84 |
# print("resize",image)
|
| 85 |
|
| 86 |
+
pipe = StableDiffusionUpscalePipeline.from_pretrained("stabilityai/stable-diffusion-x4-upscaler", torch_dtype=torch.float16)
|
| 87 |
+
# pipe = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
|
| 88 |
+
pipe = pipe.to(device)
|
| 89 |
+
pipe.enable_attention_slicing()
|
| 90 |
|
| 91 |
+
ret = pipe(prompt=prompt,
|
| 92 |
image=image,
|
| 93 |
num_inference_steps=10,
|
| 94 |
guidance_scale=0)
|
|
|
|
| 100 |
|
| 101 |
def upscale2(image, prompt):
|
| 102 |
print("upscale2",image,prompt)
|
|
|
|
| 103 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 104 |
+
print("device",device)
|
|
|
|
| 105 |
|
| 106 |
+
pipe = LDMSuperResolutionPipeline.from_pretrained("CompVis/ldm-super-resolution-4x-openimages", torch_dtype=torch.float16)
|
| 107 |
+
pipe = pipe.to(device)
|
| 108 |
+
pipe.enable_attention_slicing()
|
| 109 |
+
|
| 110 |
+
upscaled_image = pipe(image, num_inference_steps=10, eta=1).images[0]
|
| 111 |
return upscaled_image
|
| 112 |
|
| 113 |
def convert_pil_to_cv2(image):
|
|
|
|
| 142 |
return img
|
| 143 |
|
| 144 |
def upscale3(image):
|
| 145 |
+
print("upscale3",image)
|
| 146 |
+
|
| 147 |
model_path = f"up_models/modelx4.ort"
|
| 148 |
img = convert_pil_to_cv2(image)
|
| 149 |
|
| 150 |
+
# if img.ndim == 2:
|
| 151 |
+
# print("upscale3","img.ndim == 2")
|
| 152 |
+
# img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
| 153 |
+
|
| 154 |
+
# if img.shape[2] == 4:
|
| 155 |
+
# print("upscale3","img.shape[2] == 4")
|
| 156 |
+
# alpha = img[:, :, 3] # GRAY
|
| 157 |
+
# alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR
|
| 158 |
+
# alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR
|
| 159 |
+
# alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY
|
| 160 |
+
|
| 161 |
+
# img = img[:, :, 0:3] # BGR
|
| 162 |
+
# image_output = post_process(inference(model_path, pre_process(img))) # BGR
|
| 163 |
+
# image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA
|
| 164 |
+
# image_output[:, :, 3] = alpha_output
|
| 165 |
+
|
| 166 |
+
# print("upscale3","img.shape[2] == 3")
|
| 167 |
+
image_output = post_process(inference(model_path, pre_process(img))) # BGR
|
| 168 |
|
| 169 |
return image_output
|
| 170 |
|