Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -569,7 +569,8 @@ class GuiSD:
|
|
| 569 |
type_model_precision=model_precision,
|
| 570 |
retain_task_model_in_cache=retain_task_cache_gui,
|
| 571 |
)
|
| 572 |
-
|
|
|
|
| 573 |
if task != "txt2img" and not image_control:
|
| 574 |
raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
|
| 575 |
|
|
@@ -708,8 +709,6 @@ class GuiSD:
|
|
| 708 |
"ip_adapter_scale": params_ip_scale,
|
| 709 |
}
|
| 710 |
|
| 711 |
-
# print(pipe_params)
|
| 712 |
-
|
| 713 |
random_number = random.randint(1, 100)
|
| 714 |
if random_number < 25 and num_images < 3:
|
| 715 |
if not upscaler_model and steps < 45 and task in ["txt2img", "img2img"] and not adetailer_active_a and not adetailer_active_b:
|
|
@@ -717,18 +716,18 @@ class GuiSD:
|
|
| 717 |
pipe_params["num_images"] = num_images
|
| 718 |
gr.Info("Num images x 2 🎉")
|
| 719 |
|
| 720 |
-
# Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
|
| 721 |
-
self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 722 |
-
|
| 723 |
info_state = f"PROCESSING "
|
| 724 |
-
for img, seed,
|
| 725 |
info_state += ">"
|
| 726 |
-
if
|
| 727 |
info_state = f"COMPLETED. Seeds: {str(seed)}"
|
| 728 |
if vae_msg:
|
| 729 |
info_state = info_state + "<br>" + vae_msg
|
| 730 |
if msg_lora:
|
| 731 |
info_state = info_state + "<br>" + "<br>".join(msg_lora)
|
|
|
|
|
|
|
|
|
|
| 732 |
yield img, info_state
|
| 733 |
|
| 734 |
|
|
|
|
| 569 |
type_model_precision=model_precision,
|
| 570 |
retain_task_model_in_cache=retain_task_cache_gui,
|
| 571 |
)
|
| 572 |
+
self.model.stream_config(concurrency=5, latent_resize_by=1, vae_decoding=False)
|
| 573 |
+
|
| 574 |
if task != "txt2img" and not image_control:
|
| 575 |
raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
|
| 576 |
|
|
|
|
| 709 |
"ip_adapter_scale": params_ip_scale,
|
| 710 |
}
|
| 711 |
|
|
|
|
|
|
|
| 712 |
random_number = random.randint(1, 100)
|
| 713 |
if random_number < 25 and num_images < 3:
|
| 714 |
if not upscaler_model and steps < 45 and task in ["txt2img", "img2img"] and not adetailer_active_a and not adetailer_active_b:
|
|
|
|
| 716 |
pipe_params["num_images"] = num_images
|
| 717 |
gr.Info("Num images x 2 🎉")
|
| 718 |
|
|
|
|
|
|
|
|
|
|
| 719 |
info_state = f"PROCESSING "
|
| 720 |
+
for img, seed, image_path, metadata in self.model(**pipe_params):
|
| 721 |
info_state += ">"
|
| 722 |
+
if image_path:
|
| 723 |
info_state = f"COMPLETED. Seeds: {str(seed)}"
|
| 724 |
if vae_msg:
|
| 725 |
info_state = info_state + "<br>" + vae_msg
|
| 726 |
if msg_lora:
|
| 727 |
info_state = info_state + "<br>" + "<br>".join(msg_lora)
|
| 728 |
+
|
| 729 |
+
info_state = info_state + "<br>" + "GENERATION DATA:<br>" + "<br>-------<br>".join(metadata).replace("\n", "<br>")
|
| 730 |
+
|
| 731 |
yield img, info_state
|
| 732 |
|
| 733 |
|