Spaces:
Running
on
Zero
Running
on
Zero
Update optimized.py
Browse files- optimized.py +7 -2
optimized.py
CHANGED
|
@@ -17,6 +17,7 @@ import gc
|
|
| 17 |
huggingface_token = os.getenv("HUGGINFACE_TOKEN")
|
| 18 |
device = "cuda"
|
| 19 |
torch_dtype = torch.bfloat16
|
|
|
|
| 20 |
|
| 21 |
def self_attention_slicing(module, slice_size=3):
|
| 22 |
"""Modified from Diffusers' original for Flux compatibility"""
|
|
@@ -107,7 +108,7 @@ pipe.enable_attention_slicing(1)
|
|
| 107 |
|
| 108 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 109 |
@spaces.GPU
|
| 110 |
-
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale, guidance_start, guidance_end):
|
| 111 |
print(f"Memory Usage: {torch.cuda.memory_summary(device=None, abbreviated=False)}")
|
| 112 |
|
| 113 |
# Load control image
|
|
@@ -117,6 +118,8 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
|
|
| 117 |
h = h - h % 8
|
| 118 |
control_image = control_image.resize((int(w * scale), int(h * scale)))
|
| 119 |
print("Size to: " + str(control_image.size[0]) + ", " + str(control_image.size[1]))
|
|
|
|
|
|
|
| 120 |
image = pipe(
|
| 121 |
prompt=prompt,
|
| 122 |
control_image=control_image,
|
|
@@ -126,7 +129,8 @@ def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_
|
|
| 126 |
height=h,
|
| 127 |
width=w,
|
| 128 |
control_guidance_start=guidance_start,
|
| 129 |
-
control_guidance_end=guidance_end
|
|
|
|
| 130 |
).images[0]
|
| 131 |
return image
|
| 132 |
# Create Gradio interface
|
|
@@ -136,6 +140,7 @@ iface = gr.Interface(
|
|
| 136 |
gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
|
| 137 |
gr.Slider(1, 3, value=1, label="Scale"),
|
| 138 |
gr.Slider(2, 20, value=8, label="Steps"),
|
|
|
|
| 139 |
gr.Image(type="pil", label="Control Image"),
|
| 140 |
gr.Slider(0, 1, value=0.6, label="ControlNet Scale"),
|
| 141 |
gr.Slider(1, 20, value=3.5, label="Guidance Scale"),
|
|
|
|
| 17 |
huggingface_token = os.getenv("HUGGINFACE_TOKEN")
|
| 18 |
device = "cuda"
|
| 19 |
torch_dtype = torch.bfloat16
|
| 20 |
+
MAX_SEED = 1000000
|
| 21 |
|
| 22 |
def self_attention_slicing(module, slice_size=3):
|
| 23 |
"""Modified from Diffusers' original for Flux compatibility"""
|
|
|
|
| 108 |
|
| 109 |
print(f"VRAM used: {torch.cuda.memory_allocated()/1e9:.2f}GB")
|
| 110 |
@spaces.GPU
|
| 111 |
+
def generate_image(prompt, scale, steps, seed, control_image, controlnet_conditioning_scale, guidance_scale, guidance_start, guidance_end):
|
| 112 |
print(f"Memory Usage: {torch.cuda.memory_summary(device=None, abbreviated=False)}")
|
| 113 |
|
| 114 |
# Load control image
|
|
|
|
| 118 |
h = h - h % 8
|
| 119 |
control_image = control_image.resize((int(w * scale), int(h * scale)))
|
| 120 |
print("Size to: " + str(control_image.size[0]) + ", " + str(control_image.size[1]))
|
| 121 |
+
generator = torch.Generator().manual_seed(seed)
|
| 122 |
+
|
| 123 |
image = pipe(
|
| 124 |
prompt=prompt,
|
| 125 |
control_image=control_image,
|
|
|
|
| 129 |
height=h,
|
| 130 |
width=w,
|
| 131 |
control_guidance_start=guidance_start,
|
| 132 |
+
control_guidance_end=guidance_end,
|
| 133 |
+
generator=generator
|
| 134 |
).images[0]
|
| 135 |
return image
|
| 136 |
# Create Gradio interface
|
|
|
|
| 140 |
gr.Textbox(lines=2, placeholder="Enter your prompt here..."),
|
| 141 |
gr.Slider(1, 3, value=1, label="Scale"),
|
| 142 |
gr.Slider(2, 20, value=8, label="Steps"),
|
| 143 |
+
gr.Slider(0, MAX_SEED, value=42, label="Seed"),
|
| 144 |
gr.Image(type="pil", label="Control Image"),
|
| 145 |
gr.Slider(0, 1, value=0.6, label="ControlNet Scale"),
|
| 146 |
gr.Slider(1, 20, value=3.5, label="Guidance Scale"),
|