rahul7star commited on
Commit
8d53f6f
·
verified ·
1 Parent(s): 86a9faa

Create app1.py

Browse files
Files changed (1) hide show
  1. app1.py +451 -0
app1.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import random
4
+ import sys
5
+ import tempfile
6
+ from typing import Sequence, Mapping, Any, Union
7
+
8
+ import spaces
9
+ import torch
10
+ import gradio as gr
11
+ from PIL import Image
12
+ from huggingface_hub import hf_hub_download
13
+ from comfy import model_management
14
+
15
+ def hf_hub_download_local(repo_id, filename, local_dir, **kwargs):
16
+ downloaded_path = hf_hub_download(repo_id=repo_id, filename=filename, **kwargs)
17
+ os.makedirs(local_dir, exist_ok=True)
18
+ base_filename = os.path.basename(filename)
19
+ target_path = os.path.join(local_dir, base_filename)
20
+
21
+ if os.path.exists(target_path) or os.path.islink(target_path):
22
+ os.remove(target_path)
23
+
24
+ os.symlink(downloaded_path, target_path)
25
+ return target_path
26
+
27
+ # --- Model Downloads ---
28
+ print("Downloading models from Hugging Face Hub...")
29
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/text_encoders/umt5_xxl_fp8_e4m3fn_scaled.safetensors", local_dir="models/text_encoders")
30
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
31
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.2_ComfyUI_Repackaged", filename="split_files/diffusion_models/wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", local_dir="models/unet")
32
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/vae/wan_2.1_vae.safetensors", local_dir="models/vae")
33
+ hf_hub_download_local(repo_id="Comfy-Org/Wan_2.1_ComfyUI_repackaged", filename="split_files/clip_vision/clip_vision_h.safetensors", local_dir="models/clip_vision")
34
+ hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors", local_dir="models/loras")
35
+ hf_hub_download_local(repo_id="Kijai/WanVideo_comfy", filename="Wan22-Lightning/Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors", local_dir="models/loras")
36
+ print("Downloads complete.")
37
+
38
+ # --- Image Processing Functions ---
39
+ def calculate_video_dimensions(width, height, max_size=832, min_size=480):
40
+ """
41
+ Calculate video dimensions based on input image size.
42
+ Larger dimension becomes max_size, smaller becomes proportional.
43
+ If square, use min_size x min_size.
44
+ Results are rounded to nearest multiple of 16.
45
+ """
46
+ # Handle square images
47
+ if width == height:
48
+ video_width = min_size
49
+ video_height = min_size
50
+ else:
51
+ # Calculate aspect ratio
52
+ aspect_ratio = width / height
53
+
54
+ if width > height:
55
+ # Landscape orientation
56
+ video_width = max_size
57
+ video_height = int(max_size / aspect_ratio)
58
+ else:
59
+ # Portrait orientation
60
+ video_height = max_size
61
+ video_width = int(max_size * aspect_ratio)
62
+
63
+ # Round to nearest multiple of 16
64
+ video_width = round(video_width / 16) * 16
65
+ video_height = round(video_height / 16) * 16
66
+
67
+ # Ensure minimum size
68
+ video_width = max(video_width, 16)
69
+ video_height = max(video_height, 16)
70
+
71
+ return video_width, video_height
72
+
73
+ def resize_and_crop_to_match(target_image, reference_image):
74
+ """
75
+ Resize and center crop target_image to match reference_image dimensions.
76
+ """
77
+ ref_width, ref_height = reference_image.size
78
+ target_width, target_height = target_image.size
79
+
80
+ # Calculate scaling factor to ensure target covers reference dimensions
81
+ scale = max(ref_width / target_width, ref_height / target_height)
82
+
83
+ # Resize target image
84
+ new_width = int(target_width * scale)
85
+ new_height = int(target_height * scale)
86
+ resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
87
+
88
+ # Center crop to match reference dimensions
89
+ left = (new_width - ref_width) // 2
90
+ top = (new_height - ref_height) // 2
91
+ right = left + ref_width
92
+ bottom = top + ref_height
93
+
94
+ cropped = resized.crop((left, top, right, bottom))
95
+ return cropped
96
+
97
+ # --- Boilerplate code from the original script ---
98
+ def get_value_at_index(obj: Union[Sequence, Mapping], index: int) -> Any:
99
+ """Returns the value at the given index of a sequence or mapping.
100
+
101
+ If the object is a sequence (like list or string), returns the value at the given index.
102
+ If the object is a mapping (like a dictionary), returns the value at the index-th key.
103
+
104
+ Some return a dictionary, in these cases, we look for the "results" key
105
+
106
+ Args:
107
+ obj (Union[Sequence, Mapping]): The object to retrieve the value from.
108
+ index (int): The index of the value to retrieve.
109
+
110
+ Returns:
111
+ Any: The value at the given index.
112
+
113
+ Raises:
114
+ IndexError: If the index is out of bounds for the object and the object is not a mapping.
115
+ """
116
+ try:
117
+ return obj[index]
118
+ except KeyError:
119
+ # This is a fallback for custom node outputs that might be dictionaries
120
+ if isinstance(obj, Mapping) and "result" in obj:
121
+ return obj["result"][index]
122
+ raise
123
+
124
+ def find_path(name: str, path: str = None) -> str:
125
+ """
126
+ Recursively looks at parent folders starting from the given path until it finds the given name.
127
+ Returns the path as a Path object if found, or None otherwise.
128
+ """
129
+ if path is None:
130
+ path = os.getcwd()
131
+
132
+ if name in os.listdir(path):
133
+ path_name = os.path.join(path, name)
134
+ print(f"'{name}' found: {path_name}")
135
+ return path_name
136
+
137
+ parent_directory = os.path.dirname(path)
138
+ if parent_directory == path:
139
+ return None
140
+
141
+ return find_path(name, parent_directory)
142
+
143
+
144
+ def add_comfyui_directory_to_sys_path() -> None:
145
+ """
146
+ Add 'ComfyUI' to the sys.path
147
+ """
148
+ comfyui_path = find_path("ComfyUI")
149
+ if comfyui_path is not None and os.path.isdir(comfyui_path):
150
+ sys.path.append(comfyui_path)
151
+ print(f"'{comfyui_path}' added to sys.path")
152
+ else:
153
+ print("Could not find ComfyUI directory. Please run from a parent folder of ComfyUI.")
154
+
155
+ def add_extra_model_paths() -> None:
156
+ """
157
+ Parse the optional extra_model_paths.yaml file and add the parsed paths to the sys.path.
158
+ """
159
+ try:
160
+ from main import load_extra_path_config
161
+ except ImportError:
162
+ print(
163
+ "Could not import load_extra_path_config from main.py. This might be okay if you don't use it."
164
+ )
165
+ return
166
+
167
+ extra_model_paths = find_path("extra_model_paths.yaml")
168
+ if extra_model_paths is not None:
169
+ load_extra_path_config(extra_model_paths)
170
+ else:
171
+ print("Could not find an optional 'extra_model_paths.yaml' config file.")
172
+
173
+ def import_custom_nodes() -> None:
174
+ """Find all custom nodes in the custom_nodes folder and add those node objects to NODE_CLASS_MAPPINGS
175
+ This function sets up a new asyncio event loop, initializes the PromptServer,
176
+ creates a PromptQueue, and initializes the custom nodes.
177
+ """
178
+ import asyncio
179
+ import execution
180
+ from nodes import init_extra_nodes
181
+ import server
182
+
183
+ loop = asyncio.new_event_loop()
184
+ asyncio.set_event_loop(loop)
185
+ server_instance = server.PromptServer(loop)
186
+ execution.PromptQueue(server_instance)
187
+ loop.run_until_complete(init_extra_nodes(init_custom_nodes=True))
188
+
189
+
190
+ # --- Model Loading and Caching ---
191
+ MODELS_AND_NODES = {}
192
+
193
+ print("Setting up ComfyUI paths...")
194
+ add_comfyui_directory_to_sys_path()
195
+ add_extra_model_paths()
196
+
197
+ print("Importing custom nodes...")
198
+ import_custom_nodes()
199
+
200
+ # Now that paths are set up, we can import from nodes
201
+ from nodes import NODE_CLASS_MAPPINGS
202
+ global folder_paths # Make folder_paths globally accessible
203
+ import folder_paths
204
+
205
+ print("Loading models into memory. This may take a few minutes...")
206
+
207
+ # Load Text-to-Image models (CLIP, UNETs, VAE)
208
+ cliploader = NODE_CLASS_MAPPINGS["CLIPLoader"]()
209
+ MODELS_AND_NODES["clip"] = cliploader.load_clip(
210
+ clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan", device="cpu"
211
+ )
212
+
213
+ unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
214
+ unet_low_noise = unetloader.load_unet(
215
+ unet_name="wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors",
216
+ weight_dtype="default",
217
+ )
218
+ unet_high_noise = unetloader.load_unet(
219
+ unet_name="wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors",
220
+ weight_dtype="default",
221
+ )
222
+
223
+ vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
224
+ MODELS_AND_NODES["vae"] = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors")
225
+
226
+ # Load LoRAs
227
+ loraloadermodelonly = NODE_CLASS_MAPPINGS["LoraLoaderModelOnly"]()
228
+ MODELS_AND_NODES["model_low_noise"] = loraloadermodelonly.load_lora_model_only(
229
+ lora_name="Wan2.2-Lightning_I2V-A14B-4steps-lora_LOW_fp16.safetensors",
230
+ strength_model=0.8,
231
+ model=get_value_at_index(unet_low_noise, 0),
232
+ )
233
+ MODELS_AND_NODES["model_high_noise"] = loraloadermodelonly.load_lora_model_only(
234
+ lora_name="Wan2.2-Lightning_I2V-A14B-4steps-lora_HIGH_fp16.safetensors",
235
+ strength_model=0.8,
236
+ model=get_value_at_index(unet_high_noise, 0),
237
+ )
238
+
239
+ # Load Vision model
240
+ clipvisionloader = NODE_CLASS_MAPPINGS["CLIPVisionLoader"]()
241
+ MODELS_AND_NODES["clip_vision"] = clipvisionloader.load_clip(
242
+ clip_name="clip_vision_h.safetensors"
243
+ )
244
+
245
+ # Instantiate all required node classes
246
+ MODELS_AND_NODES["CLIPTextEncode"] = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
247
+ MODELS_AND_NODES["LoadImage"] = NODE_CLASS_MAPPINGS["LoadImage"]()
248
+ MODELS_AND_NODES["CLIPVisionEncode"] = NODE_CLASS_MAPPINGS["CLIPVisionEncode"]()
249
+ MODELS_AND_NODES["ModelSamplingSD3"] = NODE_CLASS_MAPPINGS["ModelSamplingSD3"]()
250
+ MODELS_AND_NODES["PathchSageAttentionKJ"] = NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
251
+ MODELS_AND_NODES["WanFirstLastFrameToVideo"] = NODE_CLASS_MAPPINGS["WanFirstLastFrameToVideo"]()
252
+ MODELS_AND_NODES["KSamplerAdvanced"] = NODE_CLASS_MAPPINGS["KSamplerAdvanced"]()
253
+ MODELS_AND_NODES["VAEDecode"] = NODE_CLASS_MAPPINGS["VAEDecode"]()
254
+ MODELS_AND_NODES["CreateVideo"] = NODE_CLASS_MAPPINGS["CreateVideo"]()
255
+ MODELS_AND_NODES["SaveVideo"] = NODE_CLASS_MAPPINGS["SaveVideo"]()
256
+
257
+ print("Pre-loading main models onto GPU...")
258
+ model_loaders = [
259
+ MODELS_AND_NODES["clip"],
260
+ MODELS_AND_NODES["vae"],
261
+ MODELS_AND_NODES["model_low_noise"], # This is the UNET + LoRA
262
+ MODELS_AND_NODES["model_high_noise"], # This is the other UNET + LoRA
263
+ MODELS_AND_NODES["clip_vision"],
264
+ ]
265
+ model_management.load_models_gpu([
266
+ loader[0].patcher if hasattr(loader[0], 'patcher') else loader[0] for loader in model_loaders
267
+ ])
268
+ print("All models loaded successfully!")
269
+
270
+ # --- Main Video Generation Logic ---
271
+ @spaces.GPU(duration=120)
272
+ def generate_video(
273
+ start_image_pil,
274
+ end_image_pil,
275
+ prompt,
276
+ negative_prompt="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走,过曝,",
277
+ duration=33,
278
+ progress=gr.Progress(track_tqdm=True)
279
+ ):
280
+ """
281
+ The main function to generate a video based on user inputs.
282
+ This function is called every time the user clicks the 'Generate' button.
283
+ """
284
+ FPS = 16
285
+
286
+ # Process images: resize and crop second image to match first
287
+ # The first image determines the dimensions
288
+ processed_start_image = start_image_pil.copy()
289
+ processed_end_image = resize_and_crop_to_match(end_image_pil, start_image_pil)
290
+
291
+ # Calculate video dimensions based on the first image
292
+ video_width, video_height = calculate_video_dimensions(
293
+ processed_start_image.width,
294
+ processed_start_image.height
295
+ )
296
+
297
+ print(f"Input image size: {processed_start_image.width}x{processed_start_image.height}")
298
+ print(f"Video dimensions: {video_width}x{video_height}")
299
+
300
+ clip = MODELS_AND_NODES["clip"]
301
+ vae = MODELS_AND_NODES["vae"]
302
+ model_low_noise = MODELS_AND_NODES["model_low_noise"]
303
+ model_high_noise = MODELS_AND_NODES["model_high_noise"]
304
+ clip_vision = MODELS_AND_NODES["clip_vision"]
305
+
306
+ cliptextencode = MODELS_AND_NODES["CLIPTextEncode"]
307
+ loadimage = MODELS_AND_NODES["LoadImage"]
308
+ clipvisionencode = MODELS_AND_NODES["CLIPVisionEncode"]
309
+ modelsamplingsd3 = MODELS_AND_NODES["ModelSamplingSD3"]
310
+ pathchsageattentionkj = MODELS_AND_NODES["PathchSageAttentionKJ"]
311
+ wanfirstlastframetovideo = MODELS_AND_NODES["WanFirstLastFrameToVideo"]
312
+ ksampleradvanced = MODELS_AND_NODES["KSamplerAdvanced"]
313
+ vaedecode = MODELS_AND_NODES["VAEDecode"]
314
+ createvideo = MODELS_AND_NODES["CreateVideo"]
315
+ savevideo = MODELS_AND_NODES["SaveVideo"]
316
+
317
+ # Save processed images to temporary files
318
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as start_file, \
319
+ tempfile.NamedTemporaryFile(suffix=".png", delete=False) as end_file:
320
+ processed_start_image.save(start_file.name)
321
+ processed_end_image.save(end_file.name)
322
+ start_image_path = start_file.name
323
+ end_image_path = end_file.name
324
+
325
+ with torch.inference_mode():
326
+ progress(0.1, desc="Encoding text and images...")
327
+ # --- Workflow execution ---
328
+ positive_conditioning = cliptextencode.encode(text=prompt, clip=get_value_at_index(clip, 0))
329
+ negative_conditioning = cliptextencode.encode(text=negative_prompt, clip=get_value_at_index(clip, 0))
330
+
331
+ start_image_loaded = loadimage.load_image(image=start_image_path)
332
+ end_image_loaded = loadimage.load_image(image=end_image_path)
333
+
334
+ clip_vision_encoded_start = clipvisionencode.encode(
335
+ crop="none", clip_vision=get_value_at_index(clip_vision, 0), image=get_value_at_index(start_image_loaded, 0)
336
+ )
337
+ clip_vision_encoded_end = clipvisionencode.encode(
338
+ crop="none", clip_vision=get_value_at_index(clip_vision, 0), image=get_value_at_index(end_image_loaded, 0)
339
+ )
340
+
341
+ progress(0.2, desc="Preparing initial latents...")
342
+ initial_latents = wanfirstlastframetovideo.EXECUTE_NORMALIZED(
343
+ width=video_width, height=video_height, length=duration, batch_size=1,
344
+ positive=get_value_at_index(positive_conditioning, 0),
345
+ negative=get_value_at_index(negative_conditioning, 0),
346
+ vae=get_value_at_index(vae, 0),
347
+ clip_vision_start_image=get_value_at_index(clip_vision_encoded_start, 0),
348
+ clip_vision_end_image=get_value_at_index(clip_vision_encoded_end, 0),
349
+ start_image=get_value_at_index(start_image_loaded, 0),
350
+ end_image=get_value_at_index(end_image_loaded, 0),
351
+ )
352
+
353
+ progress(0.3, desc="Patching models...")
354
+ model_low_patched = modelsamplingsd3.patch(shift=8, model=get_value_at_index(model_low_noise, 0))
355
+ model_low_final = pathchsageattentionkj.patch(sage_attention="auto", model=get_value_at_index(model_low_patched, 0))
356
+
357
+ model_high_patched = modelsamplingsd3.patch(shift=8, model=get_value_at_index(model_high_noise, 0))
358
+ model_high_final = pathchsageattentionkj.patch(sage_attention="auto", model=get_value_at_index(model_high_patched, 0))
359
+
360
+ progress(0.5, desc="Running KSampler (Step 1/2)...")
361
+ latent_step1 = ksampleradvanced.sample(
362
+ add_noise="enable", noise_seed=random.randint(1, 2**64), steps=8, cfg=1,
363
+ sampler_name="euler", scheduler="simple", start_at_step=0, end_at_step=4,
364
+ return_with_leftover_noise="enable", model=get_value_at_index(model_high_final, 0),
365
+ positive=get_value_at_index(initial_latents, 0),
366
+ negative=get_value_at_index(initial_latents, 1),
367
+ latent_image=get_value_at_index(initial_latents, 2),
368
+ )
369
+
370
+ progress(0.7, desc="Running KSampler (Step 2/2)...")
371
+ latent_step2 = ksampleradvanced.sample(
372
+ add_noise="disable", noise_seed=random.randint(1, 2**64), steps=8, cfg=1,
373
+ sampler_name="euler", scheduler="simple", start_at_step=4, end_at_step=10000,
374
+ return_with_leftover_noise="disable", model=get_value_at_index(model_low_final, 0),
375
+ positive=get_value_at_index(initial_latents, 0),
376
+ negative=get_value_at_index(initial_latents, 1),
377
+ latent_image=get_value_at_index(latent_step1, 0),
378
+ )
379
+
380
+ progress(0.8, desc="Decoding VAE...")
381
+ decoded_images = vaedecode.decode(samples=get_value_at_index(latent_step2, 0), vae=get_value_at_index(vae, 0))
382
+
383
+ progress(0.9, desc="Creating and saving video...")
384
+ video_data = createvideo.create_video(fps=FPS, images=get_value_at_index(decoded_images, 0))
385
+
386
+ # Save the video to ComfyUI's output directory
387
+ save_result = savevideo.save_video(
388
+ filename_prefix="GradioVideo", format="mp4", codec="h264",
389
+ video=get_value_at_index(video_data, 0),
390
+ )
391
+
392
+ progress(1.0, desc="Done!")
393
+ return f"output/{save_result['ui']['images'][0]['filename']}"
394
+
395
+
396
+
397
+ css = '''
398
+ .fillable{max-width: 1100px !important}
399
+ .dark .progress-text {color: white}
400
+ '''
401
+ with gr.Blocks(theme=gr.themes.Citrus(), css=css) as app:
402
+ gr.Markdown("# Wan 2.2 First/Last Frame Video Fast")
403
+ gr.Markdown("Running the [Wan 2.2 First/Last Frame ComfyUI workflow](https://www.reddit.com/r/StableDiffusion/comments/1me4306/psa_wan_22_does_first_frame_last_frame_out_of_the/) and the [lightx2v/Wan2.2-Lightning](https://huggingface.co/lightx2v/Wan2.2-Lightning) 8-step LoRA on ZeroGPU")
404
+
405
+ with gr.Row():
406
+ with gr.Column():
407
+ with gr.Group():
408
+ with gr.Row():
409
+ start_image = gr.Image(type="pil", label="Start Frame")
410
+ end_image = gr.Image(type="pil", label="End Frame")
411
+
412
+ prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
413
+
414
+ with gr.Accordion("Advanced Settings", open=False, visible=False):
415
+ duration = gr.Radio(
416
+ [("Short (2s)", 33), ("Mid (4s)", 66)],
417
+ value=33,
418
+ label="Video Duration",
419
+ visible=False
420
+ )
421
+ negative_prompt = gr.Textbox(
422
+ label="Negative Prompt",
423
+ value="色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走,过曝,",
424
+ visible=False
425
+ )
426
+
427
+ generate_button = gr.Button("Generate Video", variant="primary")
428
+
429
+ with gr.Column():
430
+ output_video = gr.Video(label="Generated Video", autoplay=True)
431
+
432
+ generate_button.click(
433
+ fn=generate_video,
434
+ inputs=[start_image, end_image, prompt, negative_prompt, duration],
435
+ outputs=output_video
436
+ )
437
+
438
+ gr.Examples(
439
+ examples=[
440
+ ["poli_tower.png", "tower_takes_off.png", "the man turns around"],
441
+ ["ugly_sonic.jpeg", "squatting_sonic.png", "the character dodges the missiles"],
442
+ ["capyabara_zoomed.png", "capybara.webp", "a dramatic dolly zoom"],
443
+ ],
444
+ inputs=[start_image, end_image, prompt],
445
+ outputs=output_video,
446
+ fn=generate_video,
447
+ cache_examples="lazy",
448
+ )
449
+
450
+ if __name__ == "__main__":
451
+ app.launch(share=True)