foivospar
commited on
Commit
·
5ef2207
1
Parent(s):
f9abe33
fix demo
Browse files
app.py
CHANGED
|
@@ -17,6 +17,7 @@ from insightface.app import FaceAnalysis
|
|
| 17 |
from PIL import Image
|
| 18 |
import numpy as np
|
| 19 |
import random
|
|
|
|
| 20 |
|
| 21 |
import gradio as gr
|
| 22 |
|
|
@@ -32,7 +33,8 @@ else:
|
|
| 32 |
|
| 33 |
# download models
|
| 34 |
from huggingface_hub import hf_hub_download
|
| 35 |
-
from modelscope import snapshot_download
|
|
|
|
| 36 |
|
| 37 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/config.json", local_dir="./models")
|
| 38 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/diffusion_pytorch_model.safetensors", local_dir="./models")
|
|
@@ -40,7 +42,16 @@ hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="encoder/config.json", lo
|
|
| 40 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="encoder/pytorch_model.bin", local_dir="./models")
|
| 41 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arcface.onnx", local_dir="./models/antelopev2")
|
| 42 |
|
| 43 |
-
base_model = snapshot_download('AI-ModelScope/stable-diffusion-v1-5', cache_dir='./models')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# Load face detection and recognition package
|
| 46 |
app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
|
|
@@ -59,7 +70,8 @@ pipeline = StableDiffusionPipeline.from_pretrained(
|
|
| 59 |
text_encoder=encoder,
|
| 60 |
unet=unet,
|
| 61 |
torch_dtype=dtype,
|
| 62 |
-
safety_checker=None
|
|
|
|
| 63 |
)
|
| 64 |
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
| 65 |
pipeline = pipeline.to(device)
|
|
|
|
| 17 |
from PIL import Image
|
| 18 |
import numpy as np
|
| 19 |
import random
|
| 20 |
+
import os
|
| 21 |
|
| 22 |
import gradio as gr
|
| 23 |
|
|
|
|
| 33 |
|
| 34 |
# download models
|
| 35 |
from huggingface_hub import hf_hub_download
|
| 36 |
+
#from modelscope import snapshot_download
|
| 37 |
+
from modelscope.hub.file_download import model_file_download
|
| 38 |
|
| 39 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/config.json", local_dir="./models")
|
| 40 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arc2face/diffusion_pytorch_model.safetensors", local_dir="./models")
|
|
|
|
| 42 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="encoder/pytorch_model.bin", local_dir="./models")
|
| 43 |
hf_hub_download(repo_id="FoivosPar/Arc2Face", filename="arcface.onnx", local_dir="./models/antelopev2")
|
| 44 |
|
| 45 |
+
#base_model = snapshot_download('AI-ModelScope/stable-diffusion-v1-5', cache_dir='./models')
|
| 46 |
+
model_dir = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='model_index.json', cache_dir='./models')
|
| 47 |
+
base_model = os.path.dirname(model_dir)
|
| 48 |
+
_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='scheduler/scheduler_config.json', cache_dir='./models')
|
| 49 |
+
_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/merges.txt', cache_dir='./models')
|
| 50 |
+
_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/special_tokens_map.json', cache_dir='./models')
|
| 51 |
+
_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/tokenizer_config.json', cache_dir='./models')
|
| 52 |
+
_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='tokenizer/vocab.json', cache_dir='./models')
|
| 53 |
+
_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='vae/config.json', cache_dir='./models')
|
| 54 |
+
_ = model_file_download(model_id='AI-ModelScope/stable-diffusion-v1-5', file_path='vae/diffusion_pytorch_model.safetensors', cache_dir='./models')
|
| 55 |
|
| 56 |
# Load face detection and recognition package
|
| 57 |
app = FaceAnalysis(name='antelopev2', root='./', providers=['CPUExecutionProvider'])
|
|
|
|
| 70 |
text_encoder=encoder,
|
| 71 |
unet=unet,
|
| 72 |
torch_dtype=dtype,
|
| 73 |
+
safety_checker=None,
|
| 74 |
+
feature_extractor=None
|
| 75 |
)
|
| 76 |
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
|
| 77 |
pipeline = pipeline.to(device)
|