Spaces:
Running
on
Zero
Running
on
Zero
Commit
·
05ef38f
1
Parent(s):
a69b645
Re-add spaces and tweak example
Browse files- app.py +19 -51
- requirements.txt +1 -1
app.py
CHANGED
|
@@ -4,12 +4,21 @@ import time
|
|
| 4 |
import math
|
| 5 |
import random
|
| 6 |
import torch
|
|
|
|
| 7 |
|
| 8 |
from diffusers import StableDiffusionXLInpaintPipeline
|
| 9 |
from PIL import Image, ImageFilter
|
| 10 |
|
| 11 |
max_64_bit_int = 2**63 - 1
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
if torch.cuda.is_available():
|
| 14 |
device = "cuda"
|
| 15 |
floatType = torch.float16
|
|
@@ -19,6 +28,8 @@ else:
|
|
| 19 |
floatType = torch.float32
|
| 20 |
variant = None
|
| 21 |
|
|
|
|
|
|
|
| 22 |
pipe = StableDiffusionXLInpaintPipeline.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype = floatType, variant = variant)
|
| 23 |
pipe = pipe.to(device)
|
| 24 |
|
|
@@ -77,6 +88,7 @@ def check(
|
|
| 77 |
):
|
| 78 |
raise gr.Error("At least one border must be enlarged.")
|
| 79 |
|
|
|
|
| 80 |
def uncrop(
|
| 81 |
input_image,
|
| 82 |
enlarge_top,
|
|
@@ -208,7 +220,7 @@ def uncrop(
|
|
| 208 |
process_width = math.floor(output_width * factor)
|
| 209 |
process_height = math.floor(output_height * factor)
|
| 210 |
|
| 211 |
-
limitation = " Due to technical
|
| 212 |
else:
|
| 213 |
process_width = output_width
|
| 214 |
process_height = output_height
|
|
@@ -272,19 +284,7 @@ def uncrop(
|
|
| 272 |
|
| 273 |
with gr.Blocks() as interface:
|
| 274 |
gr.HTML(
|
| 275 |
-
|
| 276 |
-
<h1 style="text-align: center;">Outpainting demo</h1>
|
| 277 |
-
<p style="text-align: center;">Enlarges the point of view of your image, freely, without account, without watermark, without installation, which can be downloaded</p>
|
| 278 |
-
<br/>
|
| 279 |
-
<br/>
|
| 280 |
-
✨ Powered by <i>SDXL 1.0</i> artificial intellingence.
|
| 281 |
-
<br/>
|
| 282 |
-
💻 Your computer must <u>not</u> enter into standby mode.<br/>You can duplicate this space on a free account, it works on CPU and CUDA.<br/>
|
| 283 |
-
<a href='https://huggingface.co/spaces/clinteroni/outpainting-with-differential-diffusion-demo?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14'></a>
|
| 284 |
-
<br/>
|
| 285 |
-
⚖️ You can use, modify and share the generated images but not for commercial uses.
|
| 286 |
-
|
| 287 |
-
"""
|
| 288 |
)
|
| 289 |
with gr.Row():
|
| 290 |
with gr.Column():
|
|
@@ -419,10 +419,10 @@ with gr.Blocks() as interface:
|
|
| 419 |
examples = [
|
| 420 |
[
|
| 421 |
"./examples/Coucang.jpg",
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
"A white Coucang, in a tree, ultrarealistic, realistic, photorealistic, 8k, bokeh",
|
| 427 |
"Border, frame, painting, drawing, cartoon, anime, 3d, scribbling, smear, noise, blur, watermark",
|
| 428 |
0,
|
|
@@ -441,42 +441,10 @@ with gr.Blocks() as interface:
|
|
| 441 |
|
| 442 |
gr.Markdown(
|
| 443 |
"""
|
| 444 |
-
## How to prompt your image
|
| 445 |
-
|
| 446 |
-
To easily read your prompt, start with the subject, then describ the pose or action, then secondary elements, then the background, then the graphical style, then the image quality:
|
| 447 |
-
```
|
| 448 |
-
A Vietnamese woman, red clothes, walking, smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
|
| 449 |
-
```
|
| 450 |
-
|
| 451 |
-
You can use round brackets to increase the importance of a part:
|
| 452 |
-
```
|
| 453 |
-
A Vietnamese woman, (red clothes), walking, smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
|
| 454 |
-
```
|
| 455 |
-
|
| 456 |
-
You can use several levels of round brackets to even more increase the importance of a part:
|
| 457 |
-
```
|
| 458 |
-
A Vietnamese woman, ((red clothes)), (walking), smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
|
| 459 |
-
```
|
| 460 |
-
|
| 461 |
-
You can use number instead of several round brackets:
|
| 462 |
-
```
|
| 463 |
-
A Vietnamese woman, (red clothes:1.5), (walking), smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
|
| 464 |
-
```
|
| 465 |
-
|
| 466 |
-
You can do the same thing with square brackets to decrease the importance of a part:
|
| 467 |
-
```
|
| 468 |
-
A [Vietnamese] woman, (red clothes:1.5), (walking), smilling, in the street, a car on the left, in a modern city, photorealistic, 8k
|
| 469 |
-
```
|
| 470 |
-
|
| 471 |
-
To easily read your negative prompt, organize it the same way as your prompt (not important for the AI):
|
| 472 |
-
```
|
| 473 |
-
man, boy, hat, running, tree, bicycle, forest, drawing, painting, cartoon, 3d, monochrome, blurry, noisy, bokeh
|
| 474 |
-
```
|
| 475 |
-
|
| 476 |
## Credit
|
| 477 |
The [example image](https://commons.wikimedia.org/wiki/File:Coucang.jpg) is by Aprisonsan
|
| 478 |
and licensed under CC-BY-SA 4.0 International.
|
| 479 |
"""
|
| 480 |
)
|
| 481 |
|
| 482 |
-
interface.queue().launch()
|
|
|
|
| 4 |
import math
|
| 5 |
import random
|
| 6 |
import torch
|
| 7 |
+
import spaces
|
| 8 |
|
| 9 |
from diffusers import StableDiffusionXLInpaintPipeline
|
| 10 |
from PIL import Image, ImageFilter
|
| 11 |
|
| 12 |
max_64_bit_int = 2**63 - 1
|
| 13 |
|
| 14 |
+
DESCRIPTION="""
|
| 15 |
+
<h1 style="text-align: center;">Outpainting demo</h1>
|
| 16 |
+
<p style="text-align: center;">This uses code by Fabrice TIERCELIN</p>
|
| 17 |
+
<br/>
|
| 18 |
+
<a href='https://huggingface.co/spaces/clinteroni/outpainting-with-differential-diffusion-demo?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14'></a>
|
| 19 |
+
<br/>
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
if torch.cuda.is_available():
|
| 23 |
device = "cuda"
|
| 24 |
floatType = torch.float16
|
|
|
|
| 28 |
floatType = torch.float32
|
| 29 |
variant = None
|
| 30 |
|
| 31 |
+
DESCRIPTION+=f"<p>Running on {device}</p>"
|
| 32 |
+
|
| 33 |
pipe = StableDiffusionXLInpaintPipeline.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype = floatType, variant = variant)
|
| 34 |
pipe = pipe.to(device)
|
| 35 |
|
|
|
|
| 88 |
):
|
| 89 |
raise gr.Error("At least one border must be enlarged.")
|
| 90 |
|
| 91 |
+
@spaces.GPU
|
| 92 |
def uncrop(
|
| 93 |
input_image,
|
| 94 |
enlarge_top,
|
|
|
|
| 220 |
process_width = math.floor(output_width * factor)
|
| 221 |
process_height = math.floor(output_height * factor)
|
| 222 |
|
| 223 |
+
limitation = " Due to technical limitations, the image has been downscaled and then upscaled.";
|
| 224 |
else:
|
| 225 |
process_width = output_width
|
| 226 |
process_height = output_height
|
|
|
|
| 284 |
|
| 285 |
with gr.Blocks() as interface:
|
| 286 |
gr.HTML(
|
| 287 |
+
DESCRIPTION
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 288 |
)
|
| 289 |
with gr.Row():
|
| 290 |
with gr.Column():
|
|
|
|
| 419 |
examples = [
|
| 420 |
[
|
| 421 |
"./examples/Coucang.jpg",
|
| 422 |
+
417,
|
| 423 |
+
0,
|
| 424 |
+
417,
|
| 425 |
+
0,
|
| 426 |
"A white Coucang, in a tree, ultrarealistic, realistic, photorealistic, 8k, bokeh",
|
| 427 |
"Border, frame, painting, drawing, cartoon, anime, 3d, scribbling, smear, noise, blur, watermark",
|
| 428 |
0,
|
|
|
|
| 441 |
|
| 442 |
gr.Markdown(
|
| 443 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 444 |
## Credit
|
| 445 |
The [example image](https://commons.wikimedia.org/wiki/File:Coucang.jpg) is by Aprisonsan
|
| 446 |
and licensed under CC-BY-SA 4.0 International.
|
| 447 |
"""
|
| 448 |
)
|
| 449 |
|
| 450 |
+
interface.queue().launch()
|
requirements.txt
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
accelerate
|
| 2 |
-
|
| 3 |
gradio
|
| 4 |
numpy
|
| 5 |
opencv-python
|
|
|
|
| 1 |
accelerate
|
| 2 |
+
diffusers
|
| 3 |
gradio
|
| 4 |
numpy
|
| 5 |
opencv-python
|