Commit
·
bd2f353
1
Parent(s):
1942dad
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from subprocess import call
|
| 3 |
+
|
| 4 |
+
with gr.Blocks() as ui:
|
| 5 |
+
with gr.Row():
|
| 6 |
+
video = gr.File(label="Video or Image", info="Filepath of video/image that contains faces to use")
|
| 7 |
+
audio = gr.File(label="Audio", info="Filepath of video/audio file to use as raw audio source")
|
| 8 |
+
with gr.Column():
|
| 9 |
+
checkpoint = gr.Radio(["wav2lip", "wav2lip_gan"], label="Checkpoint", info="Name of saved checkpoint to load weights from")
|
| 10 |
+
no_smooth = gr.Checkbox(label="No Smooth", info="Prevent smoothing face detections over a short temporal window")
|
| 11 |
+
resize_factor = gr.Slider(minimum=1, maximum=4, step=1, label="Resize Factor", info="Reduce the resolution by this factor. Sometimes, best results are obtained at 480p or 720p")
|
| 12 |
+
with gr.Row():
|
| 13 |
+
with gr.Column():
|
| 14 |
+
pad_top = gr.Slider(minimum=0, maximum=50, step=1, value=0, label="Pad Top", info="Padding above")
|
| 15 |
+
pad_bottom = gr.Slider(minimum=0, maximum=50, step=1, value=10, label="Pad Bottom (Often increasing this to 20 allows chin to be included)", info="Padding below lips")
|
| 16 |
+
pad_left = gr.Slider(minimum=0, maximum=50, step=1, value=0, label="Pad Left", info="Padding to the left of lips")
|
| 17 |
+
pad_right = gr.Slider(minimum=0, maximum=50, step=1, value=0, label="Pad Right", info="Padding to the right of lips")
|
| 18 |
+
generate_btn = gr.Button("Generate")
|
| 19 |
+
with gr.Column():
|
| 20 |
+
result = gr.Video()
|
| 21 |
+
|
| 22 |
+
def generate(video, audio, checkpoint, no_smooth, resize_factor, pad_top, pad_bottom, pad_left, pad_right):
|
| 23 |
+
if video is None or audio is None or checkpoint is None:
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
smooth = "--nosmooth" if no_smooth else ""
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
cmd = [
|
| 30 |
+
"python",
|
| 31 |
+
"inference.py",
|
| 32 |
+
"--checkpoint_path", f"checkpoints/{checkpoint}.pth",
|
| 33 |
+
"--segmentation_path", "checkpoints/face_segmentation.pth",
|
| 34 |
+
"--enhance_face", "gfpgan",
|
| 35 |
+
"--face", video.name,
|
| 36 |
+
"--audio", audio.name,
|
| 37 |
+
"--outfile", "results/output.mp4",
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
call(cmd)
|
| 42 |
+
return "results/output.mp4"
|
| 43 |
+
|
| 44 |
+
generate_btn.click(
|
| 45 |
+
generate,
|
| 46 |
+
[video, audio, checkpoint, pad_top, pad_bottom, pad_left, pad_right, resize_factor],
|
| 47 |
+
result)
|
| 48 |
+
|
| 49 |
+
ui.queue().launch(share=True, debug=True, inline=False)
|