roman-talker commited on
Commit
f3b83d2
·
verified ·
1 Parent(s): e261d88

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -17
app.py CHANGED
@@ -1,23 +1,55 @@
1
  import gradio as gr
2
- import time
3
- import shutil
 
 
 
 
4
 
5
- def generate_video(prompt, duration):
6
- duration = max(1, min(duration, 8))
7
- time.sleep(1)
8
- placeholder_video = "placeholder.mp4"
9
- output_path = "output.mp4"
10
- shutil.copyfile(placeholder_video, output_path)
11
- return output_path
 
 
 
 
12
 
13
- with gr.Blocks() as demo:
14
- gr.Markdown("## Wan 2.2 Video Generator (Local Test Placeholder)")
15
 
16
- prompt_input = gr.Textbox(label="Prompt", placeholder="Describe your scene")
17
- duration_input = gr.Slider(label="Duration (sec)", minimum=1, maximum=8, step=1, value=6)
18
- submit_btn = gr.Button("Generate Video")
19
- output_video = gr.Video(label="Generated Video")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- submit_btn.click(generate_video, inputs=[prompt_input, duration_input], outputs=output_video)
 
 
 
 
22
 
23
- demo.launch(api_name="generate-video")
 
1
  import gradio as gr
2
+ import torch
3
+ from diffusers import WanPipeline, UniPCMultistepScheduler
4
+ from PIL import Image
5
+ import numpy as np
6
+ import random
7
+ import os
8
 
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+ pipe = WanPipeline.from_pretrained(
12
+ "Wan-AI/Wan2.2-TI2V-5B-Diffusers",
13
+ torch_dtype=torch.float16 if device=="cuda" else torch.float32
14
+ )
15
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
16
+ pipe = pipe.to(device)
17
+
18
+ FIXED_FPS = 24
19
+ MIN_DURATION, MAX_DURATION = 1, 8
20
 
 
 
21
 
22
+ def generate_video(prompt, duration, init_image=None):
23
+ duration = max(MIN_DURATION, min(MAX_DURATION, duration))
24
+ input_image = None
25
+ if init_image is not None:
26
+ input_image = Image.fromarray(init_image).convert("RGB")
27
+ video = pipe(
28
+ prompt=prompt,
29
+ img=input_image,
30
+ height=512,
31
+ width=512,
32
+ duration_seconds=duration,
33
+ guidance_scale=1.0
34
+ ).videos[0]
35
+ video_path = "output.mp4"
36
+ video.save(video_path)
37
+ return video_path
38
+
39
+ with gr.Blocks() as demo:
40
+ gr.Markdown("## Wan 2.2 TI2V-5B Video Generator")
41
+
42
+ with gr.Row():
43
+ prompt_input = gr.Textbox(label="Prompt", placeholder="Describe your scene")
44
+ duration_input = gr.Slider(label="Duration (seconds)", minimum=1, maximum=8, step=1, value=4)
45
+ init_image_input = gr.Image(label="Optional Initial Image", type="numpy")
46
+ generate_btn = gr.Button("Generate Video")
47
+ output_video = gr.Video(label="Generated Video")
48
 
49
+ generate_btn.click(
50
+ generate_video,
51
+ inputs=[prompt_input, duration_input, init_image_input],
52
+ outputs=output_video
53
+ )
54
 
55
+ demo.launch()