more context
Browse files- README.md +21 -1
- app.py +8 -3
- public/index.html +6 -1
README.md
CHANGED
|
@@ -11,26 +11,46 @@ pinned: false
|
|
| 11 |
|
| 12 |
This demo showcases [Latent Consistency Model (LCM)](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) using [Diffusers](https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline) with a MJPEG stream server.
|
| 13 |
|
|
|
|
|
|
|
| 14 |
## Running Locally
|
| 15 |
|
| 16 |
You need CUDA and Python
|
| 17 |
`TIMEOUT`: limit user session timeout
|
| 18 |
`SAFETY_CHECKER`: disabled if you want NSFW filter off
|
|
|
|
| 19 |
|
| 20 |
```bash
|
| 21 |
python -m venv venv
|
| 22 |
source venv/bin/activate
|
| 23 |
pip install -r requirements.txt
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
```
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
## Docker
|
| 28 |
You need NVIDIA Container Toolkit for Docker
|
| 29 |
|
| 30 |
```bash
|
| 31 |
docker build -t lcm-live .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
docker run -ti -e TIMEOUT=0 -e SAFETY_CHECKER=False -p 7860:7860 --gpus all lcm-live
|
| 33 |
```
|
|
|
|
| 34 |
# Demo on Hugging Face
|
| 35 |
https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model
|
| 36 |
|
|
|
|
| 11 |
|
| 12 |
This demo showcases [Latent Consistency Model (LCM)](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) using [Diffusers](https://github.com/huggingface/diffusers/tree/main/examples/community#latent-consistency-pipeline) with a MJPEG stream server.
|
| 13 |
|
| 14 |
+
You need a webcam to run this demo. 🤗
|
| 15 |
+
|
| 16 |
## Running Locally
|
| 17 |
|
| 18 |
You need CUDA and Python
|
| 19 |
`TIMEOUT`: limit user session timeout
|
| 20 |
`SAFETY_CHECKER`: disabled if you want NSFW filter off
|
| 21 |
+
`MAX_QUEUE_SIZE`: limit number of users on current app instance
|
| 22 |
|
| 23 |
```bash
|
| 24 |
python -m venv venv
|
| 25 |
source venv/bin/activate
|
| 26 |
pip install -r requirements.txt
|
| 27 |
+
uvicorn "app:app" --host 0.0.0.0 --port 7860 --reload
|
| 28 |
+
```
|
| 29 |
+
or with environment variables
|
| 30 |
+
```bash
|
| 31 |
+
TIMEOUT=120 SAFETY_CHECKER=True MAX_QUEUE_SIZE=4 uvicorn "app:app" --host 0.0.0.0 --port 7860 --reload
|
| 32 |
```
|
| 33 |
|
| 34 |
+
If you're running locally and want to test it on Mobile Safari, the webserver needs to be served over HTTPS.
|
| 35 |
+
|
| 36 |
+
```bash
|
| 37 |
+
openssl req -newkey rsa:4096 -nodes -keyout key.pem -x509 -days 365 -out certificate.pem
|
| 38 |
+
uvicorn "app:app" --host 0.0.0.0 --port 7860 --reload --log-level info --ssl-certfile=certificate.pem --ssl-keyfile=key.pem
|
| 39 |
+
```
|
| 40 |
## Docker
|
| 41 |
You need NVIDIA Container Toolkit for Docker
|
| 42 |
|
| 43 |
```bash
|
| 44 |
docker build -t lcm-live .
|
| 45 |
+
docker run -ti -p 7860:7860 --gpus all lcm-live
|
| 46 |
+
```
|
| 47 |
+
|
| 48 |
+
or with environment variables
|
| 49 |
+
|
| 50 |
+
```bash
|
| 51 |
docker run -ti -e TIMEOUT=0 -e SAFETY_CHECKER=False -p 7860:7860 --gpus all lcm-live
|
| 52 |
```
|
| 53 |
+
|
| 54 |
# Demo on Hugging Face
|
| 55 |
https://huggingface.co/spaces/radames/Real-Time-Latent-Consistency-Model
|
| 56 |
|
app.py
CHANGED
|
@@ -9,7 +9,7 @@ from fastapi.middleware.cors import CORSMiddleware
|
|
| 9 |
from fastapi.responses import StreamingResponse, JSONResponse
|
| 10 |
from fastapi.staticfiles import StaticFiles
|
| 11 |
|
| 12 |
-
from diffusers import DiffusionPipeline
|
| 13 |
import torch
|
| 14 |
from PIL import Image
|
| 15 |
import numpy as np
|
|
@@ -19,7 +19,7 @@ import uuid
|
|
| 19 |
import os
|
| 20 |
import time
|
| 21 |
|
| 22 |
-
MAX_QUEUE_SIZE =
|
| 23 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 24 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 25 |
|
|
@@ -40,7 +40,12 @@ else:
|
|
| 40 |
custom_pipeline="latent_consistency_img2img.py",
|
| 41 |
custom_revision="main",
|
| 42 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
pipe.to(torch_device="cuda", torch_dtype=torch.float16)
|
|
|
|
| 44 |
user_queue_map = {}
|
| 45 |
|
| 46 |
|
|
@@ -88,7 +93,7 @@ class InputParams(BaseModel):
|
|
| 88 |
@app.websocket("/ws")
|
| 89 |
async def websocket_endpoint(websocket: WebSocket):
|
| 90 |
await websocket.accept()
|
| 91 |
-
if len(user_queue_map) >= MAX_QUEUE_SIZE:
|
| 92 |
print("Server is full")
|
| 93 |
await websocket.send_json({"status": "error", "message": "Server is full"})
|
| 94 |
await websocket.close()
|
|
|
|
| 9 |
from fastapi.responses import StreamingResponse, JSONResponse
|
| 10 |
from fastapi.staticfiles import StaticFiles
|
| 11 |
|
| 12 |
+
from diffusers import DiffusionPipeline, AutoencoderTiny
|
| 13 |
import torch
|
| 14 |
from PIL import Image
|
| 15 |
import numpy as np
|
|
|
|
| 19 |
import os
|
| 20 |
import time
|
| 21 |
|
| 22 |
+
MAX_QUEUE_SIZE = int(os.environ.get("MAX_QUEUE_SIZE", 0))
|
| 23 |
TIMEOUT = float(os.environ.get("TIMEOUT", 0))
|
| 24 |
SAFETY_CHECKER = os.environ.get("SAFETY_CHECKER", None)
|
| 25 |
|
|
|
|
| 40 |
custom_pipeline="latent_consistency_img2img.py",
|
| 41 |
custom_revision="main",
|
| 42 |
)
|
| 43 |
+
#TODO try to use tiny VAE
|
| 44 |
+
# pipe.vae = AutoencoderTiny.from_pretrained(
|
| 45 |
+
# "madebyollin/taesd", torch_dtype=torch.float16, use_safetensors=True
|
| 46 |
+
# )
|
| 47 |
pipe.to(torch_device="cuda", torch_dtype=torch.float16)
|
| 48 |
+
pipe.set_progress_bar_config(disable=True)
|
| 49 |
user_queue_map = {}
|
| 50 |
|
| 51 |
|
|
|
|
| 93 |
@app.websocket("/ws")
|
| 94 |
async def websocket_endpoint(websocket: WebSocket):
|
| 95 |
await websocket.accept()
|
| 96 |
+
if MAX_QUEUE_SIZE > 0 and len(user_queue_map) >= MAX_QUEUE_SIZE:
|
| 97 |
print("Server is full")
|
| 98 |
await websocket.send_json({"status": "error", "message": "Server is full"})
|
| 99 |
await websocket.close()
|
public/index.html
CHANGED
|
@@ -187,9 +187,14 @@
|
|
| 187 |
</p>
|
| 188 |
</article>
|
| 189 |
<div>
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
<div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
|
| 191 |
<textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none"
|
| 192 |
-
title="Prompt
|
|
|
|
| 193 |
placeholder="Add your prompt here...">Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5, cinematic, masterpiece</textarea>
|
| 194 |
</div>
|
| 195 |
|
|
|
|
| 187 |
</p>
|
| 188 |
</article>
|
| 189 |
<div>
|
| 190 |
+
<h2 class="font-medium">Prompt</h2>
|
| 191 |
+
<p class="text-sm text-gray-500">
|
| 192 |
+
Change the prompt to generate different images.
|
| 193 |
+
</p>
|
| 194 |
<div class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center">
|
| 195 |
<textarea type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none"
|
| 196 |
+
title="Prompt, this is an example, feel free to modify"
|
| 197 |
+
oninput="this.style.height = 0;this.style.height = this.scrollHeight + 'px'"
|
| 198 |
placeholder="Add your prompt here...">Portrait of The Terminator with , glare pose, detailed, intricate, full of colour, cinematic lighting, trending on artstation, 8k, hyperrealistic, focused, extreme details, unreal engine 5, cinematic, masterpiece</textarea>
|
| 199 |
</div>
|
| 200 |
|