Spaces:
Running
on
Zero
Running
on
Zero
update
Browse files- app.py +11 -14
- flux1_inpaint.py +7 -41
app.py
CHANGED
|
@@ -12,9 +12,7 @@ def sanitize_prompt(prompt):
|
|
| 12 |
return sanitized_prompt
|
| 13 |
|
| 14 |
@spaces.GPU(duration=180)
|
| 15 |
-
def process_images(image, image2=None,prompt="a girl",
|
| 16 |
-
if negative_prompt == None:
|
| 17 |
-
negative_prompt = ""
|
| 18 |
# I'm not sure when this happen
|
| 19 |
if not isinstance(image, dict):
|
| 20 |
if image2 == None:
|
|
@@ -34,7 +32,7 @@ def process_images(image, image2=None,prompt="a girl",negative_prompt=None,inpai
|
|
| 34 |
mask = image['layers'][0]
|
| 35 |
|
| 36 |
|
| 37 |
-
output = flux1_inpaint.process_image(image["background"],mask,prompt,
|
| 38 |
|
| 39 |
return output
|
| 40 |
|
|
@@ -61,29 +59,28 @@ with demo_blocks as demo:
|
|
| 61 |
gr.HTML(read_file("demo_header.html"))
|
| 62 |
with gr.Row():
|
| 63 |
with gr.Column():
|
| 64 |
-
image = gr.ImageEditor(height=
|
| 65 |
with gr.Row(elem_id="prompt-container", equal_height=False):
|
| 66 |
with gr.Row():
|
| 67 |
prompt = gr.Textbox(label="Prompt",value="a eyes closed girl,shut eyes",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
|
| 68 |
|
| 69 |
btn = gr.Button("Inpaint!", elem_id="run_button")
|
| 70 |
-
negative_prompt = gr.Textbox(label="Negative Prompt",placeholder="negative prompt",value="worst quality", elem_id="negative_prompt")
|
| 71 |
|
| 72 |
image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None)
|
| 73 |
with gr.Accordion(label="Advanced Settings", open=False):
|
| 74 |
with gr.Row( equal_height=True):
|
| 75 |
-
strength = gr.Number(value=0.
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
with gr.Column():
|
| 82 |
-
image_out = gr.Image(sources=[],label="Output", elem_id="output-img")
|
| 83 |
|
| 84 |
|
| 85 |
|
| 86 |
-
btn.click(fn=process_images, inputs=[image, image_mask,prompt,
|
| 87 |
gr.Examples(
|
| 88 |
examples=[["examples/catman.jpg", "examples/catman_mask.jpg","He's wearing a dog face."]]
|
| 89 |
,
|
|
|
|
| 12 |
return sanitized_prompt
|
| 13 |
|
| 14 |
@spaces.GPU(duration=180)
|
| 15 |
+
def process_images(image, image2=None,prompt="a girl",inpaint_model="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0):
|
|
|
|
|
|
|
| 16 |
# I'm not sure when this happen
|
| 17 |
if not isinstance(image, dict):
|
| 18 |
if image2 == None:
|
|
|
|
| 32 |
mask = image['layers'][0]
|
| 33 |
|
| 34 |
|
| 35 |
+
output = flux1_inpaint.process_image(image["background"],mask,prompt,inpaint_model,strength,seed)
|
| 36 |
|
| 37 |
return output
|
| 38 |
|
|
|
|
| 59 |
gr.HTML(read_file("demo_header.html"))
|
| 60 |
with gr.Row():
|
| 61 |
with gr.Column():
|
| 62 |
+
image = gr.ImageEditor(height=800,sources=['upload','clipboard'],transforms=[],image_mode='RGB', layers=False, elem_id="image_upload", type="pil", label="Upload",brush=gr.Brush(colors=["#fff"], color_mode="fixed"))
|
| 63 |
with gr.Row(elem_id="prompt-container", equal_height=False):
|
| 64 |
with gr.Row():
|
| 65 |
prompt = gr.Textbox(label="Prompt",value="a eyes closed girl,shut eyes",placeholder="Your prompt (what you want in place of what is erased)", elem_id="prompt")
|
| 66 |
|
| 67 |
btn = gr.Button("Inpaint!", elem_id="run_button")
|
|
|
|
| 68 |
|
| 69 |
image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None)
|
| 70 |
with gr.Accordion(label="Advanced Settings", open=False):
|
| 71 |
with gr.Row( equal_height=True):
|
| 72 |
+
strength = gr.Number(value=0.75, minimum=0, maximum=1.0, step=0.01, label="Inpaint strength")
|
| 73 |
+
seed = gr.Number(value=0, minimum=0, step=1, label="Inpaint seed")
|
| 74 |
+
models = ["black-forest-labs/FLUX.1-schnell"]
|
| 75 |
+
inpaint_model = gr.Dropdown(label="modes", choices=models, value="black-forest-labs/FLUX.1-schnell")
|
| 76 |
+
|
| 77 |
+
|
| 78 |
with gr.Column():
|
| 79 |
+
image_out = gr.Image(height=800,sources=[],label="Output", elem_id="output-img")
|
| 80 |
|
| 81 |
|
| 82 |
|
| 83 |
+
btn.click(fn=process_images, inputs=[image, image_mask,prompt,inpaint_model,strength,seed], outputs =image_out, api_name='infer')
|
| 84 |
gr.Examples(
|
| 85 |
examples=[["examples/catman.jpg", "examples/catman_mask.jpg","He's wearing a dog face."]]
|
| 86 |
,
|
flux1_inpaint.py
CHANGED
|
@@ -1,64 +1,30 @@
|
|
| 1 |
import torch
|
| 2 |
from diffusers import FluxInpaintPipeline
|
| 3 |
-
from diffusers.utils import load_image
|
| 4 |
|
| 5 |
from PIL import Image
|
| 6 |
import sys
|
| 7 |
-
import numpy as np
|
| 8 |
-
|
| 9 |
-
import json
|
| 10 |
-
import os
|
| 11 |
import spaces
|
| 12 |
|
| 13 |
-
|
| 14 |
-
pipeline_device = 0 if torch.cuda.is_available() else -1 # TODO mix above
|
| 15 |
-
torch_dtype = torch.float16
|
| 16 |
-
debug = True
|
| 17 |
-
@spaces.GPU
|
| 18 |
-
def make_inpaint_condition(image, image_mask):
|
| 19 |
-
image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
|
| 20 |
-
image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
|
| 21 |
-
|
| 22 |
-
if image.shape[0:1] != image_mask.shape[0:1]:
|
| 23 |
-
print("error image and image_mask must have the same image size")
|
| 24 |
-
return None
|
| 25 |
-
|
| 26 |
-
image[image_mask > 0.5] = -1.0 # set as masked pixel
|
| 27 |
-
|
| 28 |
-
image = image.cpu().numpy() # torch.Tensor を NumPy 配列に変換
|
| 29 |
-
image = (image * 255).astype(np.uint8) # 0~1 の範囲を 0~255 に戻し、uint8 型にキャスト
|
| 30 |
-
image = image.transpose(0, 2, 3, 1) # (1, 3, H, W) から (1, H, W, 3) に形状を変更
|
| 31 |
-
|
| 32 |
-
return image[0]
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
|
| 38 |
@spaces.GPU
|
| 39 |
-
def process_image(image,mask_image,prompt="a
|
| 40 |
-
|
| 41 |
-
|
| 42 |
if image == None:
|
| 43 |
return None
|
| 44 |
|
| 45 |
-
#control_image=make_inpaint_condition(image,mask_image)
|
| 46 |
-
#mask_image.save("_mask_image.jpg")
|
| 47 |
-
#image = control_image
|
| 48 |
-
|
| 49 |
pipe = FluxInpaintPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
|
| 50 |
-
pipe.to(
|
| 51 |
-
|
| 52 |
generators = []
|
| 53 |
-
generator = torch.Generator(
|
| 54 |
generators.append(generator)
|
| 55 |
-
|
| 56 |
output = pipe(prompt=prompt, image=image, mask_image=mask_image,generator=generator,strength=strength)
|
| 57 |
|
| 58 |
return output.images[0]
|
| 59 |
|
| 60 |
-
|
| 61 |
if __name__ == "__main__":
|
|
|
|
| 62 |
image = Image.open(sys.argv[1])
|
| 63 |
mask = Image.open(sys.argv[2])
|
| 64 |
output = process_image(image,mask)
|
|
|
|
| 1 |
import torch
|
| 2 |
from diffusers import FluxInpaintPipeline
|
|
|
|
| 3 |
|
| 4 |
from PIL import Image
|
| 5 |
import sys
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
import spaces
|
| 7 |
|
| 8 |
+
# I only test with FLUX.1-schnell
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
@spaces.GPU
|
| 11 |
+
def process_image(image,mask_image,prompt="a person",model_id="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,num_inference_steps=4):
|
|
|
|
|
|
|
| 12 |
if image == None:
|
| 13 |
return None
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
pipe = FluxInpaintPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
|
| 16 |
+
pipe.to("cuda")
|
| 17 |
+
|
| 18 |
generators = []
|
| 19 |
+
generator = torch.Generator("cuda").manual_seed(seed)
|
| 20 |
generators.append(generator)
|
| 21 |
+
# more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
|
| 22 |
output = pipe(prompt=prompt, image=image, mask_image=mask_image,generator=generator,strength=strength)
|
| 23 |
|
| 24 |
return output.images[0]
|
| 25 |
|
|
|
|
| 26 |
if __name__ == "__main__":
|
| 27 |
+
#args input-image input-mask output
|
| 28 |
image = Image.open(sys.argv[1])
|
| 29 |
mask = Image.open(sys.argv[2])
|
| 30 |
output = process_image(image,mask)
|