Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
| 1 |
# import os
|
| 2 |
import spaces
|
| 3 |
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
import torch
|
| 6 |
from PIL import Image
|
|
@@ -62,7 +63,7 @@ class HFEmbedder(nn.Module):
|
|
| 62 |
|
| 63 |
|
| 64 |
device = "cuda"
|
| 65 |
-
t5 = HFEmbedder("
|
| 66 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
| 67 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|
| 68 |
# quantize(t5, weights=qfloat8)
|
|
@@ -760,14 +761,15 @@ def generate_image(
|
|
| 760 |
if seed == 0:
|
| 761 |
seed = int(random.random() * 1000000)
|
| 762 |
|
| 763 |
-
|
| 764 |
-
torch_device = torch.device(
|
| 765 |
|
| 766 |
global model, model_zero_init
|
| 767 |
if not model_zero_init:
|
| 768 |
model = model.to(torch_device)
|
| 769 |
model_zero_init = True
|
| 770 |
-
|
|
|
|
| 771 |
if do_img2img and init_image is not None:
|
| 772 |
init_image = get_image(init_image)
|
| 773 |
if resize_img:
|
|
@@ -792,9 +794,8 @@ def generate_image(
|
|
| 792 |
timesteps = timesteps[t_idx:]
|
| 793 |
x = t * x + (1.0 - t) * init_image.to(x.dtype)
|
| 794 |
|
| 795 |
-
|
| 796 |
-
|
| 797 |
-
x = denoise(model, **inp, timesteps=timesteps, guidance=guidance)
|
| 798 |
|
| 799 |
# with profile(activities=[ProfilerActivity.CPU],record_shapes=True,profile_memory=True) as prof:
|
| 800 |
# print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=20))
|
|
@@ -807,7 +808,8 @@ def generate_image(
|
|
| 807 |
x = x.clamp(-1, 1)
|
| 808 |
x = rearrange(x[0], "c h w -> h w c")
|
| 809 |
img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
|
| 810 |
-
|
|
|
|
| 811 |
return img, seed
|
| 812 |
|
| 813 |
def create_demo():
|
|
|
|
| 1 |
# import os
|
| 2 |
import spaces
|
| 3 |
|
| 4 |
+
import time
|
| 5 |
import gradio as gr
|
| 6 |
import torch
|
| 7 |
from PIL import Image
|
|
|
|
| 63 |
|
| 64 |
|
| 65 |
device = "cuda"
|
| 66 |
+
t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
| 67 |
clip = HFEmbedder("openai/clip-vit-large-patch14", max_length=77, torch_dtype=torch.bfloat16).to(device)
|
| 68 |
ae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=torch.bfloat16).to(device)
|
| 69 |
# quantize(t5, weights=qfloat8)
|
|
|
|
| 761 |
if seed == 0:
|
| 762 |
seed = int(random.random() * 1000000)
|
| 763 |
|
| 764 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 765 |
+
torch_device = torch.device(device)
|
| 766 |
|
| 767 |
global model, model_zero_init
|
| 768 |
if not model_zero_init:
|
| 769 |
model = model.to(torch_device)
|
| 770 |
model_zero_init = True
|
| 771 |
+
|
| 772 |
+
t = time.perf_counter()
|
| 773 |
if do_img2img and init_image is not None:
|
| 774 |
init_image = get_image(init_image)
|
| 775 |
if resize_img:
|
|
|
|
| 794 |
timesteps = timesteps[t_idx:]
|
| 795 |
x = t * x + (1.0 - t) * init_image.to(x.dtype)
|
| 796 |
|
| 797 |
+
inp = prepare(t5=t5, clip=clip, img=x, prompt=prompt)
|
| 798 |
+
x = denoise(model, **inp, timesteps=timesteps, guidance=guidance)
|
|
|
|
| 799 |
|
| 800 |
# with profile(activities=[ProfilerActivity.CPU],record_shapes=True,profile_memory=True) as prof:
|
| 801 |
# print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=20))
|
|
|
|
| 808 |
x = x.clamp(-1, 1)
|
| 809 |
x = rearrange(x[0], "c h w -> h w c")
|
| 810 |
img = Image.fromarray((127.5 * (x + 1.0)).cpu().byte().numpy())
|
| 811 |
+
seed += f"\nGenerated in {time.perf_counter() - t:.2f}s"
|
| 812 |
+
|
| 813 |
return img, seed
|
| 814 |
|
| 815 |
def create_demo():
|