Spaces:
Running
on
Zero
Running
on
Zero
zR
commited on
Commit
·
c496671
1
Parent(s):
36bda7f
- app.py +35 -4
- requirements.txt +3 -2
app.py
CHANGED
|
@@ -1,15 +1,19 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import threading
|
| 3 |
import time
|
| 4 |
|
| 5 |
import gradio as gr
|
|
|
|
| 6 |
import torch
|
| 7 |
from diffusers import CogVideoXPipeline
|
| 8 |
-
from diffusers.utils import export_to_video
|
| 9 |
from datetime import datetime, timedelta
|
| 10 |
from openai import OpenAI
|
| 11 |
import spaces
|
|
|
|
| 12 |
import moviepy.editor as mp
|
|
|
|
|
|
|
| 13 |
|
| 14 |
dtype = torch.float16
|
| 15 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -29,6 +33,25 @@ Video descriptions must have the same num of words as examples below. Extra word
|
|
| 29 |
"""
|
| 30 |
|
| 31 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
def convert_prompt(prompt: str, retry_times: int = 3) -> str:
|
| 33 |
if not os.environ.get("OPENAI_API_KEY"):
|
| 34 |
return prompt
|
|
@@ -94,9 +117,10 @@ def infer(
|
|
| 94 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 95 |
video_path = f"./output/{timestamp}.mp4"
|
| 96 |
os.makedirs(os.path.dirname(video_path), exist_ok=True)
|
| 97 |
-
|
| 98 |
return video_path
|
| 99 |
|
|
|
|
| 100 |
def convert_to_gif(video_path):
|
| 101 |
clip = mp.VideoFileClip(video_path)
|
| 102 |
clip = clip.set_fps(8)
|
|
@@ -105,6 +129,7 @@ def convert_to_gif(video_path):
|
|
| 105 |
clip.write_gif(gif_path, fps=8)
|
| 106 |
return gif_path
|
| 107 |
|
|
|
|
| 108 |
def delete_old_files():
|
| 109 |
while True:
|
| 110 |
now = datetime.now()
|
|
@@ -118,6 +143,7 @@ def delete_old_files():
|
|
| 118 |
os.remove(file_path)
|
| 119 |
time.sleep(600) # Sleep for 10 minutes
|
| 120 |
|
|
|
|
| 121 |
threading.Thread(target=delete_old_files, daemon=True).start()
|
| 122 |
|
| 123 |
with gr.Blocks() as demo:
|
|
@@ -144,7 +170,9 @@ with gr.Blocks() as demo:
|
|
| 144 |
enhance_button = gr.Button("✨ Enhance Prompt(Optional)")
|
| 145 |
|
| 146 |
with gr.Column():
|
| 147 |
-
gr.Markdown("**Optional Parameters** (default values are recommended)"
|
|
|
|
|
|
|
| 148 |
with gr.Row():
|
| 149 |
num_inference_steps = gr.Number(label="Inference Steps", value=50)
|
| 150 |
guidance_scale = gr.Number(label="Guidance Scale", value=6.0)
|
|
@@ -156,6 +184,7 @@ with gr.Blocks() as demo:
|
|
| 156 |
download_video_button = gr.File(label="📥 Download Video", visible=False)
|
| 157 |
download_gif_button = gr.File(label="📥 Download GIF", visible=False)
|
| 158 |
|
|
|
|
| 159 |
def generate(prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
|
| 160 |
video_path = infer(prompt, num_inference_steps, guidance_scale, progress=progress)
|
| 161 |
video_update = gr.update(visible=True, value=video_path)
|
|
@@ -165,9 +194,11 @@ with gr.Blocks() as demo:
|
|
| 165 |
|
| 166 |
return video_path, video_update, gif_update
|
| 167 |
|
|
|
|
| 168 |
def enhance_prompt_func(prompt):
|
| 169 |
return convert_prompt(prompt, retry_times=1)
|
| 170 |
|
|
|
|
| 171 |
generate_button.click(
|
| 172 |
generate,
|
| 173 |
inputs=[prompt, num_inference_steps, guidance_scale],
|
|
@@ -181,4 +212,4 @@ with gr.Blocks() as demo:
|
|
| 181 |
)
|
| 182 |
|
| 183 |
if __name__ == "__main__":
|
| 184 |
-
demo.launch(server_name="127.0.0.1", server_port=
|
|
|
|
| 1 |
import os
|
| 2 |
+
import tempfile
|
| 3 |
import threading
|
| 4 |
import time
|
| 5 |
|
| 6 |
import gradio as gr
|
| 7 |
+
import numpy as np
|
| 8 |
import torch
|
| 9 |
from diffusers import CogVideoXPipeline
|
|
|
|
| 10 |
from datetime import datetime, timedelta
|
| 11 |
from openai import OpenAI
|
| 12 |
import spaces
|
| 13 |
+
import imageio
|
| 14 |
import moviepy.editor as mp
|
| 15 |
+
from typing import List, Union
|
| 16 |
+
import PIL
|
| 17 |
|
| 18 |
dtype = torch.float16
|
| 19 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 33 |
"""
|
| 34 |
|
| 35 |
|
| 36 |
+
def export_to_video_imageio(
|
| 37 |
+
video_frames: Union[List[np.ndarray], List[PIL.Image.Image]], output_video_path: str = None, fps: int = 8
|
| 38 |
+
) -> str:
|
| 39 |
+
"""
|
| 40 |
+
Export the video frames to a video file using imageio lib to Avoid "green screen" issue (for example CogVideoX)
|
| 41 |
+
"""
|
| 42 |
+
if output_video_path is None:
|
| 43 |
+
output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name
|
| 44 |
+
|
| 45 |
+
if isinstance(video_frames[0], PIL.Image.Image):
|
| 46 |
+
video_frames = [np.array(frame) for frame in video_frames]
|
| 47 |
+
|
| 48 |
+
with imageio.get_writer(output_video_path, fps=fps) as writer:
|
| 49 |
+
for frame in video_frames:
|
| 50 |
+
writer.append_data(frame)
|
| 51 |
+
|
| 52 |
+
return output_video_path
|
| 53 |
+
|
| 54 |
+
|
| 55 |
def convert_prompt(prompt: str, retry_times: int = 3) -> str:
|
| 56 |
if not os.environ.get("OPENAI_API_KEY"):
|
| 57 |
return prompt
|
|
|
|
| 117 |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 118 |
video_path = f"./output/{timestamp}.mp4"
|
| 119 |
os.makedirs(os.path.dirname(video_path), exist_ok=True)
|
| 120 |
+
export_to_video_imageio(video[1:], video_path)
|
| 121 |
return video_path
|
| 122 |
|
| 123 |
+
|
| 124 |
def convert_to_gif(video_path):
|
| 125 |
clip = mp.VideoFileClip(video_path)
|
| 126 |
clip = clip.set_fps(8)
|
|
|
|
| 129 |
clip.write_gif(gif_path, fps=8)
|
| 130 |
return gif_path
|
| 131 |
|
| 132 |
+
|
| 133 |
def delete_old_files():
|
| 134 |
while True:
|
| 135 |
now = datetime.now()
|
|
|
|
| 143 |
os.remove(file_path)
|
| 144 |
time.sleep(600) # Sleep for 10 minutes
|
| 145 |
|
| 146 |
+
|
| 147 |
threading.Thread(target=delete_old_files, daemon=True).start()
|
| 148 |
|
| 149 |
with gr.Blocks() as demo:
|
|
|
|
| 170 |
enhance_button = gr.Button("✨ Enhance Prompt(Optional)")
|
| 171 |
|
| 172 |
with gr.Column():
|
| 173 |
+
gr.Markdown("**Optional Parameters** (default values are recommended)<br>"
|
| 174 |
+
"Turn Inference Steps larger if you want more detailed video, but it will be slower.<br>"
|
| 175 |
+
"50 steps are recommended for most cases. will cause 120 seconds for inference.<br>")
|
| 176 |
with gr.Row():
|
| 177 |
num_inference_steps = gr.Number(label="Inference Steps", value=50)
|
| 178 |
guidance_scale = gr.Number(label="Guidance Scale", value=6.0)
|
|
|
|
| 184 |
download_video_button = gr.File(label="📥 Download Video", visible=False)
|
| 185 |
download_gif_button = gr.File(label="📥 Download GIF", visible=False)
|
| 186 |
|
| 187 |
+
|
| 188 |
def generate(prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
|
| 189 |
video_path = infer(prompt, num_inference_steps, guidance_scale, progress=progress)
|
| 190 |
video_update = gr.update(visible=True, value=video_path)
|
|
|
|
| 194 |
|
| 195 |
return video_path, video_update, gif_update
|
| 196 |
|
| 197 |
+
|
| 198 |
def enhance_prompt_func(prompt):
|
| 199 |
return convert_prompt(prompt, retry_times=1)
|
| 200 |
|
| 201 |
+
|
| 202 |
generate_button.click(
|
| 203 |
generate,
|
| 204 |
inputs=[prompt, num_inference_steps, guidance_scale],
|
|
|
|
| 212 |
)
|
| 213 |
|
| 214 |
if __name__ == "__main__":
|
| 215 |
+
demo.launch(server_name="127.0.0.1", server_port=7870, share=True)
|
requirements.txt
CHANGED
|
@@ -1,8 +1,9 @@
|
|
| 1 |
gradio==4.40.0
|
| 2 |
imageio-ffmpeg==0.5.1
|
| 3 |
torch==2.2.0
|
| 4 |
-
git+https://github.com/huggingface/diffusers.git@
|
| 5 |
transformers==4.43.3
|
| 6 |
spaces==0.29.2
|
| 7 |
moviepy==1.0.3
|
| 8 |
-
openai==1.38.0
|
|
|
|
|
|
| 1 |
gradio==4.40.0
|
| 2 |
imageio-ffmpeg==0.5.1
|
| 3 |
torch==2.2.0
|
| 4 |
+
git+https://github.com/huggingface/diffusers.git@32da2e7673cfe0475a47c41b859f5fbd8bf17a40#egg=diffusers
|
| 5 |
transformers==4.43.3
|
| 6 |
spaces==0.29.2
|
| 7 |
moviepy==1.0.3
|
| 8 |
+
openai==1.38.0
|
| 9 |
+
accelerate==0.33.0
|