Spaces:
Runtime error
Runtime error
new layout
Browse files
app.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
-
import spaces
|
| 4 |
import torch
|
| 5 |
import random
|
| 6 |
from PIL import Image
|
|
@@ -12,7 +12,7 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
| 12 |
|
| 13 |
pipe = FluxKontextPipeline.from_pretrained("fuliucansheng/FLUX.1-Kontext-dev-diffusers", torch_dtype=torch.bfloat16).to("cuda")
|
| 14 |
|
| 15 |
-
@spaces.GPU
|
| 16 |
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
| 17 |
"""
|
| 18 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
|
@@ -76,7 +76,7 @@ def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5
|
|
| 76 |
).images[0]
|
| 77 |
return image, seed, gr.Button(visible=True)
|
| 78 |
|
| 79 |
-
@spaces.GPU
|
| 80 |
def infer_example(input_image, prompt):
|
| 81 |
image, seed, _ = infer(input_image, prompt)
|
| 82 |
return image, seed
|
|
@@ -84,7 +84,13 @@ def infer_example(input_image, prompt):
|
|
| 84 |
css="""
|
| 85 |
#col-container {
|
| 86 |
margin: 0 auto;
|
| 87 |
-
max-width:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
}
|
| 89 |
"""
|
| 90 |
|
|
@@ -94,50 +100,53 @@ with gr.Blocks(css=css) as demo:
|
|
| 94 |
gr.Markdown(f"""# FLUX.1 Kontext [dev]
|
| 95 |
Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
|
| 96 |
""")
|
| 97 |
-
with gr.Row():
|
| 98 |
with gr.Column():
|
| 99 |
-
input_image = gr.Image(label="Upload the image for editing", type="pil")
|
| 100 |
-
with gr.Row():
|
| 101 |
-
prompt = gr.Text(
|
| 102 |
-
label="Prompt",
|
| 103 |
-
show_label=False,
|
| 104 |
-
max_lines=1,
|
| 105 |
-
placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
|
| 106 |
-
container=False,
|
| 107 |
-
)
|
| 108 |
-
run_button = gr.Button("Run", scale=0)
|
| 109 |
-
with gr.Accordion("Advanced Settings", open=True):
|
| 110 |
-
|
| 111 |
-
seed = gr.Slider(
|
| 112 |
-
label="Seed",
|
| 113 |
-
minimum=0,
|
| 114 |
-
maximum=MAX_SEED,
|
| 115 |
-
step=1,
|
| 116 |
-
value=0,
|
| 117 |
-
)
|
| 118 |
-
|
| 119 |
-
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 120 |
-
|
| 121 |
-
guidance_scale = gr.Slider(
|
| 122 |
-
label="Guidance Scale",
|
| 123 |
-
minimum=1,
|
| 124 |
-
maximum=10,
|
| 125 |
-
step=0.1,
|
| 126 |
-
value=2.5,
|
| 127 |
-
)
|
| 128 |
-
|
| 129 |
-
steps = gr.Slider(
|
| 130 |
-
label="Steps",
|
| 131 |
-
minimum=1,
|
| 132 |
-
maximum=30,
|
| 133 |
-
value=28,
|
| 134 |
-
step=1
|
| 135 |
-
)
|
| 136 |
|
| 137 |
with gr.Column():
|
| 138 |
-
result = gr.Image(label="Result", show_label=False, interactive=False)
|
| 139 |
reuse_button = gr.Button("Reuse this image", visible=False)
|
| 140 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 141 |
|
| 142 |
examples = gr.Examples(
|
| 143 |
examples=[
|
|
@@ -148,7 +157,7 @@ Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro
|
|
| 148 |
inputs=[input_image, prompt],
|
| 149 |
outputs=[result, seed],
|
| 150 |
fn=infer_example,
|
| 151 |
-
cache_examples=
|
| 152 |
)
|
| 153 |
|
| 154 |
gr.on(
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import numpy as np
|
| 3 |
+
# import spaces
|
| 4 |
import torch
|
| 5 |
import random
|
| 6 |
from PIL import Image
|
|
|
|
| 12 |
|
| 13 |
pipe = FluxKontextPipeline.from_pretrained("fuliucansheng/FLUX.1-Kontext-dev-diffusers", torch_dtype=torch.bfloat16).to("cuda")
|
| 14 |
|
| 15 |
+
# @spaces.GPU
|
| 16 |
def infer(input_image, prompt, seed=42, randomize_seed=False, guidance_scale=2.5, steps=28, progress=gr.Progress(track_tqdm=True)):
|
| 17 |
"""
|
| 18 |
Perform image editing using the FLUX.1 Kontext pipeline.
|
|
|
|
| 76 |
).images[0]
|
| 77 |
return image, seed, gr.Button(visible=True)
|
| 78 |
|
| 79 |
+
# @spaces.GPU
|
| 80 |
def infer_example(input_image, prompt):
|
| 81 |
image, seed, _ = infer(input_image, prompt)
|
| 82 |
return image, seed
|
|
|
|
| 84 |
css="""
|
| 85 |
#col-container {
|
| 86 |
margin: 0 auto;
|
| 87 |
+
max-width: 90vw;
|
| 88 |
+
}
|
| 89 |
+
.input-image img {
|
| 90 |
+
height: 70vh; !Important
|
| 91 |
+
}
|
| 92 |
+
#row {
|
| 93 |
+
min-height: 35vh; !Important
|
| 94 |
}
|
| 95 |
"""
|
| 96 |
|
|
|
|
| 100 |
gr.Markdown(f"""# FLUX.1 Kontext [dev]
|
| 101 |
Image editing and manipulation model guidance-distilled from FLUX.1 Kontext [pro], [[blog]](https://bfl.ai/announcements/flux-1-kontext-dev) [[model]](https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev)
|
| 102 |
""")
|
| 103 |
+
with gr.Row(equal_height=True):
|
| 104 |
with gr.Column():
|
| 105 |
+
input_image = gr.Image(label="Upload the image for editing", type="pil", elem_classes="input-image", elem_id="row")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
with gr.Column():
|
| 108 |
+
result = gr.Image(label="Result", show_label=False, interactive=False, elem_classes="input-image", elem_id="row")
|
| 109 |
reuse_button = gr.Button("Reuse this image", visible=False)
|
| 110 |
|
| 111 |
+
with gr.Row():
|
| 112 |
+
prompt = gr.Text(
|
| 113 |
+
label="Prompt",
|
| 114 |
+
show_label=False,
|
| 115 |
+
max_lines=1,
|
| 116 |
+
placeholder="Enter your prompt for editing (e.g., 'Remove glasses', 'Add a hat')",
|
| 117 |
+
container=False,
|
| 118 |
+
scale=2
|
| 119 |
+
)
|
| 120 |
+
run_button = gr.Button("Run", scale=1)
|
| 121 |
+
|
| 122 |
+
with gr.Row():
|
| 123 |
+
with gr.Accordion("Advanced Settings", open=False):
|
| 124 |
+
|
| 125 |
+
seed = gr.Slider(
|
| 126 |
+
label="Seed",
|
| 127 |
+
minimum=0,
|
| 128 |
+
maximum=MAX_SEED,
|
| 129 |
+
step=1,
|
| 130 |
+
value=0,
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
| 134 |
+
|
| 135 |
+
guidance_scale = gr.Slider(
|
| 136 |
+
label="Guidance Scale",
|
| 137 |
+
minimum=1,
|
| 138 |
+
maximum=10,
|
| 139 |
+
step=0.1,
|
| 140 |
+
value=2.5,
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
steps = gr.Slider(
|
| 144 |
+
label="Steps",
|
| 145 |
+
minimum=1,
|
| 146 |
+
maximum=40,
|
| 147 |
+
value=28,
|
| 148 |
+
step=1
|
| 149 |
+
)
|
| 150 |
|
| 151 |
examples = gr.Examples(
|
| 152 |
examples=[
|
|
|
|
| 157 |
inputs=[input_image, prompt],
|
| 158 |
outputs=[result, seed],
|
| 159 |
fn=infer_example,
|
| 160 |
+
cache_examples=False
|
| 161 |
)
|
| 162 |
|
| 163 |
gr.on(
|