back to float16
Browse files- app-img2img.py +1 -3
app-img2img.py
CHANGED
|
@@ -32,7 +32,6 @@ if SAFETY_CHECKER == "True":
|
|
| 32 |
"SimianLuo/LCM_Dreamshaper_v7",
|
| 33 |
custom_pipeline="latent_consistency_img2img.py",
|
| 34 |
custom_revision="main",
|
| 35 |
-
torch_dtype=torch.float32
|
| 36 |
)
|
| 37 |
else:
|
| 38 |
pipe = DiffusionPipeline.from_pretrained(
|
|
@@ -40,14 +39,13 @@ else:
|
|
| 40 |
safety_checker=None,
|
| 41 |
custom_pipeline="latent_consistency_img2img.py",
|
| 42 |
custom_revision="main",
|
| 43 |
-
torch_dtype=torch.float32
|
| 44 |
)
|
| 45 |
#TODO try to use tiny VAE
|
| 46 |
# pipe.vae = AutoencoderTiny.from_pretrained(
|
| 47 |
# "madebyollin/taesd", torch_dtype=torch.float16, use_safetensors=True
|
| 48 |
# )
|
| 49 |
pipe.set_progress_bar_config(disable=True)
|
| 50 |
-
pipe.to(torch_device="cuda", torch_dtype=torch.
|
| 51 |
pipe.unet.to(memory_format=torch.channels_last)
|
| 52 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 53 |
user_queue_map = {}
|
|
|
|
| 32 |
"SimianLuo/LCM_Dreamshaper_v7",
|
| 33 |
custom_pipeline="latent_consistency_img2img.py",
|
| 34 |
custom_revision="main",
|
|
|
|
| 35 |
)
|
| 36 |
else:
|
| 37 |
pipe = DiffusionPipeline.from_pretrained(
|
|
|
|
| 39 |
safety_checker=None,
|
| 40 |
custom_pipeline="latent_consistency_img2img.py",
|
| 41 |
custom_revision="main",
|
|
|
|
| 42 |
)
|
| 43 |
#TODO try to use tiny VAE
|
| 44 |
# pipe.vae = AutoencoderTiny.from_pretrained(
|
| 45 |
# "madebyollin/taesd", torch_dtype=torch.float16, use_safetensors=True
|
| 46 |
# )
|
| 47 |
pipe.set_progress_bar_config(disable=True)
|
| 48 |
+
pipe.to(torch_device="cuda", torch_dtype=torch.float16)
|
| 49 |
pipe.unet.to(memory_format=torch.channels_last)
|
| 50 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
| 51 |
user_queue_map = {}
|