Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,22 +17,19 @@ transformer = FluxTransformer2DModel.from_pretrained(
|
|
| 17 |
|
| 18 |
|
| 19 |
# Build pipeline
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
pipe.transformer.to(torch.bfloat16)
|
| 30 |
-
pipe.controlnet.to(torch.bfloat16)
|
| 31 |
-
return pipe
|
| 32 |
|
| 33 |
|
| 34 |
MARKDOWN = """
|
| 35 |
-
# FLUX.1-dev-Inpainting-Model-GPU 🔥
|
| 36 |
Model by alimama-creative
|
| 37 |
"""
|
| 38 |
|
|
@@ -40,7 +37,6 @@ Model by alimama-creative
|
|
| 40 |
def process(input_image_editor,
|
| 41 |
prompt,
|
| 42 |
negative_prompt,
|
| 43 |
-
model_name,
|
| 44 |
controlnet_conditioning_scale,
|
| 45 |
guidance_scale,
|
| 46 |
seed,
|
|
@@ -51,8 +47,6 @@ def process(input_image_editor,
|
|
| 51 |
mask = input_image_editor['layers'][0]
|
| 52 |
size = (768, 768)
|
| 53 |
image_or = image.copy()
|
| 54 |
-
pipe = load_models(model_name)
|
| 55 |
-
pipe = pipe.to("cuda")
|
| 56 |
|
| 57 |
image = image.convert("RGB").resize(size)
|
| 58 |
mask = mask.convert("RGB").resize(size)
|
|
@@ -87,7 +81,6 @@ with gr.Blocks() as demo:
|
|
| 87 |
|
| 88 |
prompt = gr.Textbox(lines=2, placeholder="Enter prompt here...")
|
| 89 |
negative_prompt = gr.Textbox(lines=2, placeholder="Enter negative_prompt here...")
|
| 90 |
-
model_name = gr.Dropdown(choices=["FLUX.1-dev-Controlnet-Inpainting-Beta","FLUX.1-dev-Controlnet-Inpainting-Alpha"], label="Select Model Inpainting", value="FLUX.1-dev-Controlnet-Inpainting-Beta")
|
| 91 |
controlnet_conditioning_scale = gr.Slider(minimum=0, step=0.01, maximum=1, value=0.9, label="controlnet_conditioning_scale")
|
| 92 |
guidance_scale = gr.Slider(minimum=1, step=0.5, maximum=10, value=3.5, label="Image to generate")
|
| 93 |
seed = gr.Slider(minimum=0, step=1, maximum=10000000, value=124, label="Seed Value")
|
|
@@ -109,7 +102,6 @@ with gr.Blocks() as demo:
|
|
| 109 |
input_image_editor_component,
|
| 110 |
prompt,
|
| 111 |
negative_prompt,
|
| 112 |
-
model_name,
|
| 113 |
controlnet_conditioning_scale,
|
| 114 |
guidance_scale,
|
| 115 |
seed,
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
# Build pipeline
|
| 20 |
+
controlnet = FluxControlNetModel.from_pretrained("alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", torch_dtype=torch.bfloat16)
|
| 21 |
+
pipe = FluxControlNetInpaintingPipeline.from_pretrained(
|
| 22 |
+
"black-forest-labs/FLUX.1-dev",
|
| 23 |
+
controlnet=controlnet,
|
| 24 |
+
transformer=transformer,
|
| 25 |
+
torch_dtype=torch.bfloat16
|
| 26 |
+
).to("cuda")
|
| 27 |
+
pipe.transformer.to(torch.bfloat16)
|
| 28 |
+
pipe.controlnet.to(torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
MARKDOWN = """
|
| 32 |
+
# FLUX.1-dev-Inpainting-Model-Beta-GPU 🔥
|
| 33 |
Model by alimama-creative
|
| 34 |
"""
|
| 35 |
|
|
|
|
| 37 |
def process(input_image_editor,
|
| 38 |
prompt,
|
| 39 |
negative_prompt,
|
|
|
|
| 40 |
controlnet_conditioning_scale,
|
| 41 |
guidance_scale,
|
| 42 |
seed,
|
|
|
|
| 47 |
mask = input_image_editor['layers'][0]
|
| 48 |
size = (768, 768)
|
| 49 |
image_or = image.copy()
|
|
|
|
|
|
|
| 50 |
|
| 51 |
image = image.convert("RGB").resize(size)
|
| 52 |
mask = mask.convert("RGB").resize(size)
|
|
|
|
| 81 |
|
| 82 |
prompt = gr.Textbox(lines=2, placeholder="Enter prompt here...")
|
| 83 |
negative_prompt = gr.Textbox(lines=2, placeholder="Enter negative_prompt here...")
|
|
|
|
| 84 |
controlnet_conditioning_scale = gr.Slider(minimum=0, step=0.01, maximum=1, value=0.9, label="controlnet_conditioning_scale")
|
| 85 |
guidance_scale = gr.Slider(minimum=1, step=0.5, maximum=10, value=3.5, label="Image to generate")
|
| 86 |
seed = gr.Slider(minimum=0, step=1, maximum=10000000, value=124, label="Seed Value")
|
|
|
|
| 102 |
input_image_editor_component,
|
| 103 |
prompt,
|
| 104 |
negative_prompt,
|
|
|
|
| 105 |
controlnet_conditioning_scale,
|
| 106 |
guidance_scale,
|
| 107 |
seed,
|