Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
import os
|
| 2 |
-
os.system('pip install --upgrade spaces')
|
| 3 |
|
| 4 |
import gradio as gr
|
| 5 |
import numpy as np
|
|
@@ -176,12 +175,21 @@ pipe = QwenImageEditPipelineCustom.from_pretrained(
|
|
| 176 |
|
| 177 |
# Load Lightning LoRA weights for acceleration
|
| 178 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 179 |
pipe.load_lora_weights(
|
| 180 |
"lightx2v/Qwen-Image-Lightning",
|
| 181 |
-
weight_name="Qwen-Image-Lightning-
|
|
|
|
| 182 |
)
|
| 183 |
-
pipe.
|
| 184 |
-
|
|
|
|
|
|
|
| 185 |
except Exception as e:
|
| 186 |
print(f"Warning: Could not load Lightning LoRA weights: {e}")
|
| 187 |
print("Continuing with base model...")
|
|
@@ -255,7 +263,7 @@ examples = [
|
|
| 255 |
]
|
| 256 |
|
| 257 |
css = """
|
| 258 |
-
#col-container {
|
| 259 |
margin: 0 auto;
|
| 260 |
max-width: 1024px;
|
| 261 |
}
|
|
@@ -266,20 +274,14 @@ css = """
|
|
| 266 |
width: 400px;
|
| 267 |
}
|
| 268 |
#edit_text{margin-top: -62px !important}
|
|
|
|
| 269 |
"""
|
| 270 |
|
| 271 |
-
with gr.Blocks(css=css) as demo:
|
| 272 |
with gr.Column(elem_id="col-container"):
|
| 273 |
-
gr.
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
<h2 style="font-style: italic;color: #5b47d1;margin-top: -27px !important;margin-left: 96px">Fast, 8-steps with Lightning LoRA</h2>
|
| 277 |
-
</div>
|
| 278 |
-
""")
|
| 279 |
-
gr.Markdown("""
|
| 280 |
-
[Learn more](https://github.com/QwenLM/Qwen-Image) about the Qwen-Image series.
|
| 281 |
-
This demo uses the [Qwen-Image-Lightning](https://huggingface.co/lightx2v/Qwen-Image-Lightning) LoRA for accelerated inference.
|
| 282 |
-
Try on [Qwen Chat](https://chat.qwen.ai/), or [download model](https://huggingface.co/Qwen/Qwen-Image-Edit) to run locally with ComfyUI or diffusers.
|
| 283 |
""")
|
| 284 |
|
| 285 |
with gr.Row():
|
|
@@ -330,7 +332,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 330 |
minimum=4,
|
| 331 |
maximum=28,
|
| 332 |
step=1,
|
| 333 |
-
value=
|
| 334 |
)
|
| 335 |
|
| 336 |
# Removed num_images_per_prompt slider entirely
|
|
|
|
| 1 |
import os
|
|
|
|
| 2 |
|
| 3 |
import gradio as gr
|
| 4 |
import numpy as np
|
|
|
|
| 175 |
|
| 176 |
# Load Lightning LoRA weights for acceleration
|
| 177 |
try:
|
| 178 |
+
pipe.load_lora_weights(
|
| 179 |
+
"eigen-ai-labs/eigen-banana-qwen-image-edit",
|
| 180 |
+
weight_name="eigen-banana-qwen-image-edit-fp16-lora.safetensors",
|
| 181 |
+
adapter_name="eigen"
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
pipe.load_lora_weights(
|
| 185 |
"lightx2v/Qwen-Image-Lightning",
|
| 186 |
+
weight_name="Qwen-Image-Lightning-4steps-V2.0-bf16.safetensors",
|
| 187 |
+
adapter_name="lightning"
|
| 188 |
)
|
| 189 |
+
pipe.set_adapters(["eigen", "lightning"], adapter_weights=[1., 1.])
|
| 190 |
+
pipe.fuse_lora(adapter_names=["eigen", "lightning"], lora_scale=1)
|
| 191 |
+
pipe.unload_lora_weights()
|
| 192 |
+
print("Successfully loaded LoRA weights")
|
| 193 |
except Exception as e:
|
| 194 |
print(f"Warning: Could not load Lightning LoRA weights: {e}")
|
| 195 |
print("Continuing with base model...")
|
|
|
|
| 263 |
]
|
| 264 |
|
| 265 |
css = """
|
| 266 |
+
#col-container, #examples {
|
| 267 |
margin: 0 auto;
|
| 268 |
max-width: 1024px;
|
| 269 |
}
|
|
|
|
| 274 |
width: 400px;
|
| 275 |
}
|
| 276 |
#edit_text{margin-top: -62px !important}
|
| 277 |
+
.dark .progress-text{color: white !important}
|
| 278 |
"""
|
| 279 |
|
| 280 |
+
with gr.Blocks(theme=gr.themes.Citrus(), css=css) as demo:
|
| 281 |
with gr.Column(elem_id="col-container"):
|
| 282 |
+
gr.Markdown("""# Eigen Banana""")
|
| 283 |
+
gr.Markdown(""" [Eigen-Banana-Qwen-Image-Edit LoRA](https://huggingface.co/eigen-ai-labs/eigen-banana-qwen-image-edit) trained on [Apple's Pico-Banana-400k dataset](https://github.com/apple/pico-banana-400k)
|
| 284 |
+
with [lightx2v/Qwen-Image-Lightning](https://huggingface.co/lightx2v/Qwen-Image-Lightning) for 4-step inference 💨
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
""")
|
| 286 |
|
| 287 |
with gr.Row():
|
|
|
|
| 332 |
minimum=4,
|
| 333 |
maximum=28,
|
| 334 |
step=1,
|
| 335 |
+
value=4
|
| 336 |
)
|
| 337 |
|
| 338 |
# Removed num_images_per_prompt slider entirely
|