Spaces:
Running
on
Zero
Running
on
Zero
Update optimized.py
Browse files- optimized.py +9 -2
optimized.py
CHANGED
|
@@ -8,6 +8,7 @@ from diffusers import FluxControlNetModel, FluxControlNetPipeline, AutoencoderKL
|
|
| 8 |
import gradio as gr
|
| 9 |
from accelerate import dispatch_model, infer_auto_device_map
|
| 10 |
from PIL import Image
|
|
|
|
| 11 |
# Corrected and optimized FluxControlNet implementation
|
| 12 |
|
| 13 |
def self_attention_slicing(module, slice_size=3):
|
|
@@ -83,6 +84,8 @@ pipe.enable_attention_slicing(1)
|
|
| 83 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 84 |
@spaces.GPU
|
| 85 |
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale):
|
|
|
|
|
|
|
| 86 |
# Load control image
|
| 87 |
control_image = load_image(control_image)
|
| 88 |
w, h = control_image.size
|
|
@@ -100,8 +103,11 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
|
|
| 100 |
).images[0]
|
| 101 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 102 |
# Aggressive memory cleanup
|
| 103 |
-
|
| 104 |
# torch.cuda.ipc_collect()
|
|
|
|
|
|
|
|
|
|
| 105 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 106 |
return image
|
| 107 |
# Create Gradio interface
|
|
@@ -122,6 +128,7 @@ iface = gr.Interface(
|
|
| 122 |
description="Generate images using the FluxControlNetPipeline. Upload a control image and enter a prompt to create an image.",
|
| 123 |
)
|
| 124 |
print(f"Memory Usage: {torch.cuda.memory_summary(device=None, abbreviated=False)}")
|
| 125 |
-
|
|
|
|
| 126 |
# Launch the app
|
| 127 |
iface.launch()
|
|
|
|
| 8 |
import gradio as gr
|
| 9 |
from accelerate import dispatch_model, infer_auto_device_map
|
| 10 |
from PIL import Image
|
| 11 |
+
import gc
|
| 12 |
# Corrected and optimized FluxControlNet implementation
|
| 13 |
|
| 14 |
def self_attention_slicing(module, slice_size=3):
|
|
|
|
| 84 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 85 |
@spaces.GPU
|
| 86 |
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale):
|
| 87 |
+
print(f"Memory Usage: {torch.cuda.memory_summary(device=None, abbreviated=False)}")
|
| 88 |
+
|
| 89 |
# Load control image
|
| 90 |
control_image = load_image(control_image)
|
| 91 |
w, h = control_image.size
|
|
|
|
| 103 |
).images[0]
|
| 104 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 105 |
# Aggressive memory cleanup
|
| 106 |
+
torch.cuda.empty_cache()
|
| 107 |
# torch.cuda.ipc_collect()
|
| 108 |
+
|
| 109 |
+
del variables
|
| 110 |
+
gc.collect()
|
| 111 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 112 |
return image
|
| 113 |
# Create Gradio interface
|
|
|
|
| 128 |
description="Generate images using the FluxControlNetPipeline. Upload a control image and enter a prompt to create an image.",
|
| 129 |
)
|
| 130 |
print(f"Memory Usage: {torch.cuda.memory_summary(device=None, abbreviated=False)}")
|
| 131 |
+
del variables
|
| 132 |
+
gc.collect()
|
| 133 |
# Launch the app
|
| 134 |
iface.launch()
|