Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -147,7 +147,7 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 147 |
lora_to_use = custom_lora
|
| 148 |
elif selected_index is not None and flux_loras and selected_index < len(flux_loras):
|
| 149 |
lora_to_use = flux_loras[selected_index]
|
| 150 |
-
|
| 151 |
# Load LoRA if needed
|
| 152 |
if lora_to_use and lora_to_use != current_lora:
|
| 153 |
try:
|
|
@@ -160,11 +160,7 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 160 |
if lora_path:
|
| 161 |
pipe.load_lora_weights(lora_path, adapter_name="selected_lora")
|
| 162 |
current_lora = lora_to_use
|
| 163 |
-
|
| 164 |
-
# Add trigger word to prompt if available
|
| 165 |
-
trigger_word = lora_to_use.get("trigger_word", "")
|
| 166 |
-
if trigger_word and trigger_word not in prompt:
|
| 167 |
-
prompt = f"convert the style of this portrait photo to {trigger_word} while maintaining the identity of the person. {prompt}. make sure to maintain the person's facial identity and features, while still changing the style to {trigger_word}."
|
| 168 |
|
| 169 |
except Exception as e:
|
| 170 |
print(f"Error loading LoRA: {e}")
|
|
@@ -172,13 +168,12 @@ def infer_with_lora(input_image, prompt, selected_index, custom_lora, seed=42, r
|
|
| 172 |
|
| 173 |
# Set LoRA scale if LoRA is loaded
|
| 174 |
if current_lora and hasattr(pipe, 'set_adapters'):
|
| 175 |
-
|
| 176 |
-
pipe.set_adapters("selected_lora", adapter_weights=[lora_scale])
|
| 177 |
-
except:
|
| 178 |
-
# Fallback for older diffusers versions
|
| 179 |
-
pass
|
| 180 |
|
| 181 |
input_image = input_image.convert("RGB")
|
|
|
|
|
|
|
|
|
|
| 182 |
|
| 183 |
try:
|
| 184 |
image = pipe(
|
|
|
|
| 147 |
lora_to_use = custom_lora
|
| 148 |
elif selected_index is not None and flux_loras and selected_index < len(flux_loras):
|
| 149 |
lora_to_use = flux_loras[selected_index]
|
| 150 |
+
print(f"Loaded {len(flux_loras)} LoRAs from JSON")
|
| 151 |
# Load LoRA if needed
|
| 152 |
if lora_to_use and lora_to_use != current_lora:
|
| 153 |
try:
|
|
|
|
| 160 |
if lora_path:
|
| 161 |
pipe.load_lora_weights(lora_path, adapter_name="selected_lora")
|
| 162 |
current_lora = lora_to_use
|
| 163 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
|
| 165 |
except Exception as e:
|
| 166 |
print(f"Error loading LoRA: {e}")
|
|
|
|
| 168 |
|
| 169 |
# Set LoRA scale if LoRA is loaded
|
| 170 |
if current_lora and hasattr(pipe, 'set_adapters'):
|
| 171 |
+
pipe.set_adapters("selected_lora", adapter_weights=[lora_scale])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
|
| 173 |
input_image = input_image.convert("RGB")
|
| 174 |
+
# Add trigger word to prompt
|
| 175 |
+
trigger_word = lora_to_use["trigger_word"]
|
| 176 |
+
prompt = f"convert the style of this portrait photo to {trigger_word} while maintaining the identity of the person. {prompt}. make sure to maintain the person's facial identity and features, while still changing the style to {trigger_word}."
|
| 177 |
|
| 178 |
try:
|
| 179 |
image = pipe(
|