Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -26,6 +26,7 @@ from torchvision import transforms
|
|
| 26 |
|
| 27 |
import spaces
|
| 28 |
from huggingface_hub import snapshot_download
|
|
|
|
| 29 |
|
| 30 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 31 |
|
|
@@ -116,9 +117,9 @@ def init_pipe():
|
|
| 116 |
controlnet_transformer,
|
| 117 |
)
|
| 118 |
|
| 119 |
-
pipe.vae.enable_slicing()
|
| 120 |
-
pipe.vae.enable_tiling()
|
| 121 |
-
pipe.enable_model_cpu_offload()
|
| 122 |
|
| 123 |
return pipe
|
| 124 |
|
|
@@ -129,10 +130,11 @@ def inference(source_images,
|
|
| 129 |
pipe, vae, guidance_scale,
|
| 130 |
h, w, random_seed)->List[PIL.Image.Image]:
|
| 131 |
torch.manual_seed(random_seed)
|
| 132 |
-
|
| 133 |
-
pipe.
|
| 134 |
-
pipe.
|
| 135 |
-
pipe.
|
|
|
|
| 136 |
|
| 137 |
source_pixel_values = source_images/127.5 - 1.0
|
| 138 |
source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
|
|
@@ -161,6 +163,7 @@ def inference(source_images,
|
|
| 161 |
image_latents = None
|
| 162 |
latents = source_latents
|
| 163 |
|
|
|
|
| 164 |
video = pipe(
|
| 165 |
prompt = text_prompt,
|
| 166 |
negative_prompt = negative_prompt,
|
|
@@ -169,11 +172,13 @@ def inference(source_images,
|
|
| 169 |
height = h,
|
| 170 |
width = w,
|
| 171 |
num_frames = f,
|
| 172 |
-
num_inference_steps =
|
| 173 |
interval = 6,
|
| 174 |
guidance_scale = guidance_scale,
|
| 175 |
generator = torch.Generator(device=DEVICE).manual_seed(random_seed)
|
| 176 |
).frames[0]
|
|
|
|
|
|
|
| 177 |
|
| 178 |
return video
|
| 179 |
|
|
|
|
| 26 |
|
| 27 |
import spaces
|
| 28 |
from huggingface_hub import snapshot_download
|
| 29 |
+
import time
|
| 30 |
|
| 31 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
| 32 |
|
|
|
|
| 117 |
controlnet_transformer,
|
| 118 |
)
|
| 119 |
|
| 120 |
+
# pipe.vae.enable_slicing()
|
| 121 |
+
# pipe.vae.enable_tiling()
|
| 122 |
+
# pipe.enable_model_cpu_offload()
|
| 123 |
|
| 124 |
return pipe
|
| 125 |
|
|
|
|
| 130 |
pipe, vae, guidance_scale,
|
| 131 |
h, w, random_seed)->List[PIL.Image.Image]:
|
| 132 |
torch.manual_seed(random_seed)
|
| 133 |
+
|
| 134 |
+
pipe.to(DEVICE)
|
| 135 |
+
# pipe.vae.to(DEVICE)
|
| 136 |
+
# pipe.transformer.to(DEVICE)
|
| 137 |
+
# pipe.controlnet_transformer.to(DEVICE)
|
| 138 |
|
| 139 |
source_pixel_values = source_images/127.5 - 1.0
|
| 140 |
source_pixel_values = source_pixel_values.to(torch.float16).to(DEVICE)
|
|
|
|
| 163 |
image_latents = None
|
| 164 |
latents = source_latents
|
| 165 |
|
| 166 |
+
a=time.perf_counter()
|
| 167 |
video = pipe(
|
| 168 |
prompt = text_prompt,
|
| 169 |
negative_prompt = negative_prompt,
|
|
|
|
| 172 |
height = h,
|
| 173 |
width = w,
|
| 174 |
num_frames = f,
|
| 175 |
+
num_inference_steps = 30,
|
| 176 |
interval = 6,
|
| 177 |
guidance_scale = guidance_scale,
|
| 178 |
generator = torch.Generator(device=DEVICE).manual_seed(random_seed)
|
| 179 |
).frames[0]
|
| 180 |
+
b=time.perf_counter()
|
| 181 |
+
print(f"Denoise 30 steps in {b-a}s")
|
| 182 |
|
| 183 |
return video
|
| 184 |
|