|
|
|
|
|
try: |
|
|
import spaces |
|
|
SPACES_AVAILABLE = True |
|
|
print("✅ Spaces available - ZeroGPU mode") |
|
|
except ImportError: |
|
|
SPACES_AVAILABLE = False |
|
|
print("⚠️ Spaces not available - running in regular mode") |
|
|
|
|
|
|
|
|
import os |
|
|
import uuid |
|
|
from datetime import datetime |
|
|
import random |
|
|
import torch |
|
|
import gradio as gr |
|
|
from diffusers import DiffusionPipeline |
|
|
from PIL import Image |
|
|
import traceback |
|
|
import numpy as np |
|
|
import cv2 |
|
|
import imageio |
|
|
from pathlib import Path |
|
|
import tempfile |
|
|
import shutil |
|
|
|
|
|
|
|
|
try: |
|
|
from compel import Compel, ReturnedEmbeddingsType |
|
|
COMPEL_AVAILABLE = True |
|
|
print("✅ Compel available for long prompt processing") |
|
|
except ImportError: |
|
|
COMPEL_AVAILABLE = False |
|
|
print("⚠️ Compel not available - using standard prompt processing") |
|
|
|
|
|
|
|
|
STYLE_PRESETS = { |
|
|
"None": "", |
|
|
"Cinematic": "cinematic lighting, dramatic composition, film grain, professional cinematography, movie scene, high production value", |
|
|
"Anime": "anime style, detailed animation, high quality anime, cel animation, vibrant anime colors, smooth animation", |
|
|
"Realistic": "photorealistic, ultra-detailed, natural lighting, realistic motion, lifelike animation, high fidelity", |
|
|
"Fantasy": "fantasy style, magical atmosphere, ethereal lighting, mystical effects, enchanted scene", |
|
|
"Artistic": "artistic style, creative composition, unique visual style, expressive animation, stylized rendering" |
|
|
} |
|
|
|
|
|
|
|
|
BASE_MODEL = "THUDM/CogVideoX-5b" |
|
|
|
|
|
LORA_CONFIGS = [ |
|
|
{ |
|
|
"repo_id": "hashu786/CogVideoX-LoRA-CineCam", |
|
|
"filename": "pytorch_lora_weights.safetensors", |
|
|
"adapter_name": "cinematic_camera", |
|
|
"scale": 0.6 |
|
|
}, |
|
|
{ |
|
|
"repo_id": "alibaba-pai/CogVideoX-Fun-V1.1-Reward-LoRAs", |
|
|
"filename": "pytorch_lora_weights.safetensors", |
|
|
"adapter_name": "quality_reward", |
|
|
"scale": 0.8 |
|
|
} |
|
|
|
|
|
|
|
|
] |
|
|
|
|
|
|
|
|
QUALITY_ENHANCERS = [ |
|
|
"high quality video", "(masterpiece:1.3)", "(best quality:1.2)", "smooth animation", |
|
|
"detailed motion", "fluid movement", "professional video quality", "high resolution", |
|
|
"consistent lighting", "stable composition", "(perfect anatomy:1.1)", "natural motion", |
|
|
"cinematic quality", "detailed textures", "smooth transitions" |
|
|
] |
|
|
|
|
|
|
|
|
STYLE_ENHANCERS = { |
|
|
"Cinematic": [ |
|
|
"cinematic shot", "(cinematic quality:1.3)", "movie scene", "film lighting", |
|
|
"dramatic composition", "professional cinematography", "cinematic motion", "film grain", |
|
|
"dynamic camera work", "cinematic storytelling" |
|
|
], |
|
|
"Anime": [ |
|
|
"anime animation", "(high quality anime:1.3)", "detailed anime", "smooth anime motion", |
|
|
"cel animation style", "(anime video:1.2)", "vibrant anime colors", "anime cinematics", |
|
|
"japanese animation", "fluid anime movement" |
|
|
], |
|
|
"Realistic": [ |
|
|
"realistic video", "(photorealistic animation:1.3)", "natural motion", "lifelike movement", |
|
|
"realistic lighting", "(realistic video:1.2)", "natural dynamics", "authentic motion", |
|
|
"real-world physics", "documentary style" |
|
|
], |
|
|
"Fantasy": [ |
|
|
"fantasy animation", "(magical video:1.3)", "mystical motion", "ethereal effects", |
|
|
"enchanted scene", "magical atmosphere", "fantasy cinematics", "surreal animation", |
|
|
"otherworldly movement", "magical realism" |
|
|
], |
|
|
"Artistic": [ |
|
|
"artistic video", "(creative animation:1.3)", "stylized motion", "artistic composition", |
|
|
"unique visual style", "expressive animation", "creative cinematics", "artistic movement", |
|
|
"stylized rendering", "avant-garde video" |
|
|
] |
|
|
} |
|
|
|
|
|
|
|
|
VIDEO_CONFIG = { |
|
|
"default_duration": 2.0, |
|
|
"max_duration": 4.0, |
|
|
"default_fps": 12, |
|
|
"max_fps": 24, |
|
|
"default_width": 512, |
|
|
"default_height": 512, |
|
|
"max_resolution": 768 |
|
|
} |
|
|
|
|
|
SAVE_DIR = "generated_videos" |
|
|
os.makedirs(SAVE_DIR, exist_ok=True) |
|
|
|
|
|
|
|
|
pipeline = None |
|
|
compel_processor = None |
|
|
device = None |
|
|
model_loaded = False |
|
|
|
|
|
def initialize_model(): |
|
|
"""优化的模型初始化函数""" |
|
|
global pipeline, compel_processor, device, model_loaded |
|
|
|
|
|
if model_loaded and pipeline is not None: |
|
|
return True |
|
|
|
|
|
try: |
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
print(f"🖥️ Using device: {device}") |
|
|
|
|
|
print(f"Loading CogVideoX base model: {BASE_MODEL}") |
|
|
print(f"LoRA configurations: {len(LORA_CONFIGS)} adapters") |
|
|
|
|
|
|
|
|
try: |
|
|
from diffusers import CogVideoXPipeline |
|
|
pipeline = CogVideoXPipeline.from_pretrained( |
|
|
BASE_MODEL, |
|
|
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
|
|
use_safetensors=True |
|
|
) |
|
|
print("Successfully loaded CogVideoX base model!") |
|
|
|
|
|
|
|
|
for lora_config in LORA_CONFIGS: |
|
|
try: |
|
|
pipeline.load_lora_weights( |
|
|
lora_config["repo_id"], |
|
|
weight_name=lora_config["filename"], |
|
|
adapter_name=lora_config["adapter_name"] |
|
|
) |
|
|
print(f"✓ Loaded LoRA: {lora_config['adapter_name']}") |
|
|
except Exception as lora_error: |
|
|
print(f"⚠ LoRA loading failed ({lora_config['adapter_name']}): {lora_error}") |
|
|
|
|
|
|
|
|
adapter_names = [config["adapter_name"] for config in LORA_CONFIGS] |
|
|
adapter_weights = [config["scale"] for config in LORA_CONFIGS] |
|
|
if adapter_names: |
|
|
pipeline.set_adapters(adapter_names, adapter_weights) |
|
|
print(f"✓ Applied LoRA adapters with weights: {adapter_weights}") |
|
|
|
|
|
except Exception as base_error: |
|
|
print(f"Base model loading failed: {base_error}") |
|
|
print("This should not happen with official CogVideoX model") |
|
|
return False |
|
|
|
|
|
pipeline = pipeline.to(device) |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
try: |
|
|
if hasattr(pipeline, 'enable_model_cpu_offload'): |
|
|
pipeline.enable_model_cpu_offload() |
|
|
if hasattr(pipeline, 'enable_vae_tiling'): |
|
|
pipeline.enable_vae_tiling() |
|
|
try: |
|
|
pipeline.enable_xformers_memory_efficient_attention() |
|
|
except: |
|
|
pass |
|
|
print("CogVideoX memory optimizations applied") |
|
|
except Exception as mem_error: |
|
|
print(f"Memory optimization warning: {mem_error}") |
|
|
|
|
|
|
|
|
if COMPEL_AVAILABLE and hasattr(pipeline, 'tokenizer'): |
|
|
try: |
|
|
compel_processor = Compel( |
|
|
tokenizer=pipeline.tokenizer, |
|
|
text_encoder=pipeline.text_encoder, |
|
|
returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, |
|
|
truncate_long_prompts=False |
|
|
) |
|
|
print("✅ Long prompt processor (Compel) initialized successfully") |
|
|
except Exception as compel_error: |
|
|
print(f"⚠️ Compel initialization failed: {compel_error}") |
|
|
compel_processor = None |
|
|
|
|
|
model_loaded = True |
|
|
print("✅ T2V Model initialization complete") |
|
|
return True |
|
|
|
|
|
except Exception as e: |
|
|
print(f"❌ Critical model loading error: {e}") |
|
|
print(traceback.format_exc()) |
|
|
model_loaded = False |
|
|
return False |
|
|
|
|
|
def enhance_prompt(prompt: str, style: str) -> str: |
|
|
"""修复后的增强提示词函数 - 视频优化""" |
|
|
if not prompt or prompt.strip() == "": |
|
|
return "" |
|
|
|
|
|
enhanced_parts = [prompt.strip()] |
|
|
|
|
|
|
|
|
style_prefix = STYLE_PRESETS.get(style, "") |
|
|
if style_prefix and style != "None": |
|
|
enhanced_parts.insert(0, style_prefix) |
|
|
|
|
|
|
|
|
if style in STYLE_ENHANCERS and style != "None": |
|
|
style_terms = ", ".join(STYLE_ENHANCERS[style]) |
|
|
enhanced_parts.append(style_terms) |
|
|
|
|
|
|
|
|
quality_terms = ", ".join(QUALITY_ENHANCERS) |
|
|
enhanced_parts.append(quality_terms) |
|
|
|
|
|
enhanced_prompt = ", ".join(filter(None, enhanced_parts)) |
|
|
|
|
|
print(f"🎨 Style: {style}") |
|
|
print(f"📝 Original prompt: {prompt[:100]}...") |
|
|
print(f"✨ Enhanced prompt: {enhanced_prompt[:150]}...") |
|
|
|
|
|
return enhanced_prompt |
|
|
|
|
|
def process_long_prompt(prompt, negative_prompt=""): |
|
|
"""处理长提示词""" |
|
|
if not compel_processor: |
|
|
return None, None |
|
|
|
|
|
try: |
|
|
conditioning = compel_processor([prompt, negative_prompt]) |
|
|
return conditioning, None |
|
|
except Exception as e: |
|
|
print(f"Long prompt processing failed: {e}") |
|
|
return None, None |
|
|
|
|
|
def apply_spaces_decorator(func): |
|
|
"""应用spaces装饰器 - 增加超时时间""" |
|
|
if SPACES_AVAILABLE: |
|
|
return spaces.GPU(duration=120)(func) |
|
|
return func |
|
|
|
|
|
def create_metadata_content(prompt, enhanced_prompt, seed, steps, cfg_scale, width, height, duration, fps, style): |
|
|
"""创建元数据内容 - 视频版本""" |
|
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") |
|
|
return f"""Generated Video Metadata |
|
|
====================== |
|
|
Timestamp: {timestamp} |
|
|
Original Prompt: {prompt} |
|
|
Enhanced Prompt: {enhanced_prompt} |
|
|
Seed: {seed} |
|
|
Steps: {steps} |
|
|
CFG Scale: {cfg_scale} |
|
|
Dimensions: {width}×{height} |
|
|
Duration: {duration}s |
|
|
FPS: {fps} |
|
|
Style: {style} |
|
|
Model: NSFW_Wan_14b |
|
|
Total Frames: {int(duration * fps)} |
|
|
""" |
|
|
|
|
|
def frames_to_video(frames, output_path, fps=24, format="mp4"): |
|
|
"""将帧序列转换为视频文件""" |
|
|
try: |
|
|
if format.lower() == "gif": |
|
|
|
|
|
imageio.mimsave(output_path, frames, fps=fps, loop=0) |
|
|
else: |
|
|
|
|
|
writer = imageio.get_writer(output_path, fps=fps, codec='libx264', quality=8) |
|
|
for frame in frames: |
|
|
if isinstance(frame, Image.Image): |
|
|
frame = np.array(frame) |
|
|
writer.append_data(frame) |
|
|
writer.close() |
|
|
|
|
|
return True |
|
|
except Exception as e: |
|
|
print(f"Video creation error: {e}") |
|
|
return False |
|
|
|
|
|
@apply_spaces_decorator |
|
|
def generate_video(prompt: str, style: str, negative_prompt: str = "", steps: int = 20, cfg_scale: float = 7.0, |
|
|
seed: int = -1, width: int = 512, height: int = 512, duration: float = 4.0, fps: int = 24, |
|
|
progress=gr.Progress()): |
|
|
"""视频生成函数""" |
|
|
if not prompt or prompt.strip() == "": |
|
|
return None, None, "", "" |
|
|
|
|
|
|
|
|
progress(0.1, desc="Loading T2V model...") |
|
|
if not initialize_model(): |
|
|
return None, None, "", "❌ Failed to load T2V model" |
|
|
|
|
|
progress(0.2, desc="Processing prompt...") |
|
|
|
|
|
try: |
|
|
|
|
|
if seed == -1: |
|
|
seed = random.randint(0, np.iinfo(np.int32).max) |
|
|
|
|
|
|
|
|
enhanced_prompt = enhance_prompt(prompt.strip(), style) |
|
|
|
|
|
|
|
|
if not negative_prompt.strip(): |
|
|
negative_prompt = "(low quality, worst quality:1.4), (bad anatomy:1.2), blurry, watermark, signature, text, error, distorted motion, choppy animation, inconsistent lighting, frame drops, stuttering" |
|
|
|
|
|
|
|
|
num_frames = int(duration * fps) |
|
|
print(f"🎬 Generating {num_frames} frames at {fps} FPS for {duration}s video") |
|
|
|
|
|
|
|
|
generator = torch.Generator(device).manual_seed(seed) |
|
|
|
|
|
progress(0.3, desc=f"Generating {duration}s video...") |
|
|
|
|
|
|
|
|
use_long_prompt = len(enhanced_prompt.split()) > 60 or len(enhanced_prompt) > 300 |
|
|
|
|
|
generation_kwargs = { |
|
|
"num_inference_steps": steps, |
|
|
"guidance_scale": cfg_scale, |
|
|
"width": width, |
|
|
"height": height, |
|
|
"num_frames": num_frames, |
|
|
"generator": generator |
|
|
} |
|
|
|
|
|
if use_long_prompt and compel_processor: |
|
|
conditioning, _ = process_long_prompt(enhanced_prompt, negative_prompt) |
|
|
if conditioning is not None: |
|
|
result = pipeline( |
|
|
prompt_embeds=conditioning[0:1], |
|
|
negative_prompt_embeds=conditioning[1:2], |
|
|
**generation_kwargs |
|
|
) |
|
|
else: |
|
|
result = pipeline( |
|
|
prompt=enhanced_prompt, |
|
|
negative_prompt=negative_prompt, |
|
|
**generation_kwargs |
|
|
) |
|
|
else: |
|
|
result = pipeline( |
|
|
prompt=enhanced_prompt, |
|
|
negative_prompt=negative_prompt, |
|
|
**generation_kwargs |
|
|
) |
|
|
|
|
|
progress(0.8, desc="Processing video output...") |
|
|
|
|
|
|
|
|
if hasattr(result, 'frames') and result.frames is not None: |
|
|
frames = result.frames[0] |
|
|
elif hasattr(result, 'images') and result.images is not None: |
|
|
frames = result.images |
|
|
else: |
|
|
return None, None, "", "❌ No frames generated from model" |
|
|
|
|
|
print(f"📹 Generated {len(frames)} frames") |
|
|
|
|
|
|
|
|
with tempfile.TemporaryDirectory() as temp_dir: |
|
|
|
|
|
mp4_path = os.path.join(temp_dir, f"video_{seed}.mp4") |
|
|
mp4_success = frames_to_video(frames, mp4_path, fps=fps, format="mp4") |
|
|
|
|
|
|
|
|
gif_path = os.path.join(temp_dir, f"video_{seed}.gif") |
|
|
gif_success = frames_to_video(frames, gif_path, fps=fps, format="gif") |
|
|
|
|
|
if not mp4_success and not gif_success: |
|
|
return None, None, "", "❌ Failed to create video files" |
|
|
|
|
|
|
|
|
final_mp4_path = None |
|
|
final_gif_path = None |
|
|
|
|
|
if mp4_success: |
|
|
final_mp4_path = os.path.join(SAVE_DIR, f"video_{seed}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.mp4") |
|
|
shutil.copy2(mp4_path, final_mp4_path) |
|
|
|
|
|
if gif_success: |
|
|
final_gif_path = os.path.join(SAVE_DIR, f"video_{seed}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.gif") |
|
|
shutil.copy2(gif_path, final_gif_path) |
|
|
|
|
|
progress(0.95, desc="Creating metadata...") |
|
|
|
|
|
|
|
|
metadata_content = create_metadata_content( |
|
|
prompt, enhanced_prompt, seed, steps, cfg_scale, width, height, duration, fps, style |
|
|
) |
|
|
|
|
|
progress(1.0, desc="Complete!") |
|
|
|
|
|
|
|
|
generation_info = f"Style: {style} | Seed: {seed} | Size: {width}×{height} | Duration: {duration}s | FPS: {fps} | Frames: {num_frames}" |
|
|
|
|
|
|
|
|
return final_mp4_path, final_gif_path, generation_info, metadata_content |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = str(e) |
|
|
print(f"Generation error: {error_msg}") |
|
|
print(traceback.format_exc()) |
|
|
return None, None, "", f"❌ Generation failed: {error_msg}" |
|
|
|
|
|
|
|
|
css = """ |
|
|
/* 全局容器 */ |
|
|
.gradio-container { |
|
|
max-width: 100% !important; |
|
|
margin: 0 !important; |
|
|
padding: 0 !important; |
|
|
background: linear-gradient(135deg, #e6a4f2 0%, #1197e4 100%) !important; |
|
|
min-height: 100vh !important; |
|
|
font-family: 'Segoe UI', Arial, sans-serif !important; |
|
|
} |
|
|
|
|
|
/* 主要内容区域 */ |
|
|
.main-content { |
|
|
background: rgba(255, 255, 255, 0.9) !important; |
|
|
border-radius: 20px !important; |
|
|
padding: 20px !important; |
|
|
margin: 15px !important; |
|
|
box-shadow: 0 10px 25px rgba(255, 255, 255, 0.2) !important; |
|
|
min-height: calc(100vh - 30px) !important; |
|
|
color: #3e3e3e !important; |
|
|
backdrop-filter: blur(10px) !important; |
|
|
} |
|
|
|
|
|
/* 简化标题 */ |
|
|
.title { |
|
|
text-align: center !important; |
|
|
background: linear-gradient(45deg, #bb6ded, #08676b) !important; |
|
|
-webkit-background-clip: text !important; |
|
|
-webkit-text-fill-color: transparent !important; |
|
|
background-clip: text !important; |
|
|
font-size: 2rem !important; |
|
|
margin-bottom: 15px !important; |
|
|
font-weight: bold !important; |
|
|
} |
|
|
|
|
|
/* 简化警告信息 */ |
|
|
.warning-box { |
|
|
background: linear-gradient(45deg, #bb6ded, #08676b) !important; |
|
|
color: white !important; |
|
|
padding: 8px !important; |
|
|
border-radius: 8px !important; |
|
|
margin-bottom: 15px !important; |
|
|
text-align: center !important; |
|
|
font-weight: bold !important; |
|
|
font-size: 14px !important; |
|
|
} |
|
|
|
|
|
/* 输入框样式 - 修复背景色 */ |
|
|
.prompt-box textarea, .prompt-box input { |
|
|
border-radius: 10px !important; |
|
|
border: 2px solid #bb6ded !important; |
|
|
padding: 15px !important; |
|
|
font-size: 18px !important; |
|
|
background: linear-gradient(135deg, rgba(245, 243, 255, 0.9), rgba(237, 233, 254, 0.9)) !important; |
|
|
color: #2d2d2d !important; |
|
|
} |
|
|
|
|
|
.prompt-box textarea:focus, .prompt-box input:focus { |
|
|
border-color: #08676b !important; |
|
|
box-shadow: 0 0 15px rgba(77, 8, 161, 0.3) !important; |
|
|
background: linear-gradient(135deg, rgba(255, 255, 255, 0.95), rgba(248, 249, 250, 0.95)) !important; |
|
|
} |
|
|
|
|
|
/* 右侧控制区域 - 修复背景色 */ |
|
|
.controls-section { |
|
|
background: linear-gradient(135deg, rgba(224, 218, 255, 0.8), rgba(196, 181, 253, 0.8)) !important; |
|
|
border-radius: 12px !important; |
|
|
padding: 15px !important; |
|
|
margin-bottom: 8px !important; |
|
|
border: 2px solid rgba(187, 109, 237, 0.3) !important; |
|
|
backdrop-filter: blur(5px) !important; |
|
|
} |
|
|
|
|
|
.controls-section label { |
|
|
font-weight: 600 !important; |
|
|
color: #2d2d2d !important; |
|
|
margin-bottom: 8px !important; |
|
|
} |
|
|
|
|
|
/* 修复单选按钮和输入框背景 */ |
|
|
.controls-section input[type="radio"] { |
|
|
accent-color: #bb6ded !important; |
|
|
} |
|
|
|
|
|
.controls-section input[type="number"], |
|
|
.controls-section input[type="range"] { |
|
|
background: rgba(255, 255, 255, 0.9) !important; |
|
|
border: 1px solid #bb6ded !important; |
|
|
border-radius: 6px !important; |
|
|
padding: 8px !important; |
|
|
color: #2d2d2d !important; |
|
|
} |
|
|
|
|
|
.controls-section select { |
|
|
background: rgba(255, 255, 255, 0.9) !important; |
|
|
border: 1px solid #bb6ded !important; |
|
|
border-radius: 6px !important; |
|
|
padding: 8px !important; |
|
|
color: #2d2d2d !important; |
|
|
} |
|
|
|
|
|
/* 生成按钮 */ |
|
|
.generate-btn { |
|
|
background: linear-gradient(45deg, #bb6ded, #08676b) !important; |
|
|
color: white !important; |
|
|
border: none !important; |
|
|
padding: 15px 25px !important; |
|
|
border-radius: 25px !important; |
|
|
font-size: 16px !important; |
|
|
font-weight: bold !important; |
|
|
width: 100% !important; |
|
|
cursor: pointer !important; |
|
|
transition: all 0.3s ease !important; |
|
|
text-transform: uppercase !important; |
|
|
letter-spacing: 1px !important; |
|
|
} |
|
|
|
|
|
.generate-btn:hover { |
|
|
transform: translateY(-2px) !important; |
|
|
box-shadow: 0 8px 25px rgba(187, 109, 237, 0.5) !important; |
|
|
} |
|
|
|
|
|
/* 视频输出区域 */ |
|
|
.video-output { |
|
|
border-radius: 15px !important; |
|
|
overflow: hidden !important; |
|
|
max-width: 100% !important; |
|
|
max-height: 70vh !important; |
|
|
border: 3px solid #08676b !important; |
|
|
box-shadow: 0 8px 20px rgba(0,0,0,0.15) !important; |
|
|
background: linear-gradient(135deg, rgba(255, 255, 255, 0.9), rgba(248, 249, 250, 0.9)) !important; |
|
|
} |
|
|
|
|
|
/* 下载按钮样式 */ |
|
|
.download-btn { |
|
|
background: linear-gradient(45deg, #28a745, #20c997) !important; |
|
|
color: white !important; |
|
|
border: none !important; |
|
|
padding: 10px 20px !important; |
|
|
border-radius: 20px !important; |
|
|
font-size: 14px !important; |
|
|
font-weight: bold !important; |
|
|
margin: 5px !important; |
|
|
cursor: pointer !important; |
|
|
transition: all 0.3s ease !important; |
|
|
} |
|
|
|
|
|
.download-btn:hover { |
|
|
transform: translateY(-1px) !important; |
|
|
box-shadow: 0 5px 15px rgba(40, 167, 69, 0.4) !important; |
|
|
} |
|
|
|
|
|
/* 视频信息区域 */ |
|
|
.video-info { |
|
|
background: linear-gradient(135deg, rgba(248, 249, 250, 0.2), rgba(233, 236, 239, 0.9)) !important; |
|
|
border-radius: 8px !important; |
|
|
padding: 12px !important; |
|
|
margin-top: 10px !important; |
|
|
font-size: 12px !important; |
|
|
color: #495057 !important; |
|
|
border: 2px solid rgba(187, 109, 237, 0.2) !important; |
|
|
backdrop-filter: blur(5px) !important; |
|
|
} |
|
|
|
|
|
/* 元数据区域样式 */ |
|
|
.metadata-box { |
|
|
background: linear-gradient(135deg, rgba(248, 249, 250, 0.2), rgba(233, 236, 239, 0.9)) !important; |
|
|
border-radius: 8px !important; |
|
|
padding: 15px !important; |
|
|
margin-top: 15px !important; |
|
|
font-family: 'Courier New', monospace !important; |
|
|
font-size: 12px !important; |
|
|
color: #495057 !important; |
|
|
border: 2px solid rgba(187, 109, 237, 0.2) !important; |
|
|
backdrop-filter: blur(5px) !important; |
|
|
white-space: pre-wrap !important; |
|
|
overflow-y: auto !important; |
|
|
max-height: 300px !important; |
|
|
} |
|
|
|
|
|
/* 滑块样式 */ |
|
|
.slider-container input[type="range"] { |
|
|
accent-color: #bb6ded !important; |
|
|
} |
|
|
|
|
|
/* 响应式设计 */ |
|
|
@media (max-width: 768px) { |
|
|
.main-content { |
|
|
margin: 10px !important; |
|
|
padding: 15px !important; |
|
|
} |
|
|
|
|
|
.title { |
|
|
font-size: 1.5rem !important; |
|
|
} |
|
|
} |
|
|
|
|
|
/* 强制覆盖Gradio默认样式 */ |
|
|
.gradio-container .gr-textbox, |
|
|
.gradio-container .gr-radio-group, |
|
|
.gradio-container .gr-slider, |
|
|
.gradio-container .gr-number { |
|
|
background: rgba(255, 255, 255, 0.95) !important; |
|
|
border: 1px solid rgba(187, 109, 237, 0.5) !important; |
|
|
border-radius: 8px !important; |
|
|
} |
|
|
|
|
|
.gradio-container .gr-radio-group label { |
|
|
color: #2d2d2d !important; |
|
|
background: transparent !important; |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
def create_interface(): |
|
|
with gr.Blocks(css=css, title="Adult NSFW AI Video Generator") as interface: |
|
|
with gr.Column(elem_classes=["main-content"]): |
|
|
|
|
|
gr.HTML('<div class="title">🎬 Adult NSFW AI Video Generator</div>') |
|
|
|
|
|
|
|
|
gr.HTML(''' |
|
|
<div class="warning-box"> |
|
|
⚠️ 18+ CONTENT WARNING ⚠️ | Model: NSFW_Wan_14b → CogVideoX Architecture |
|
|
</div> |
|
|
''') |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(scale=2): |
|
|
prompt_input = gr.Textbox( |
|
|
label="Detailed Video Prompt", |
|
|
placeholder="Describe the video scene you want to generate...", |
|
|
lines=12, |
|
|
elem_classes=["prompt-box"] |
|
|
) |
|
|
|
|
|
negative_prompt_input = gr.Textbox( |
|
|
label="Negative Prompt (Optional)", |
|
|
placeholder="Things you don't want in the video...", |
|
|
lines=4, |
|
|
elem_classes=["prompt-box"] |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Column(scale=1): |
|
|
|
|
|
with gr.Group(elem_classes=["controls-section"]): |
|
|
style_input = gr.Radio( |
|
|
label="Style Preset", |
|
|
choices=list(STYLE_PRESETS.keys()), |
|
|
value="Cinematic" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(elem_classes=["controls-section"]): |
|
|
gr.HTML("<b>📹 Video Settings</b>") |
|
|
duration_input = gr.Slider( |
|
|
label=f"Duration (seconds)", |
|
|
minimum=1.0, |
|
|
maximum=VIDEO_CONFIG["max_duration"], |
|
|
value=VIDEO_CONFIG["default_duration"], |
|
|
step=0.5 |
|
|
) |
|
|
|
|
|
fps_input = gr.Slider( |
|
|
label="FPS (Frames per Second)", |
|
|
minimum=12, |
|
|
maximum=VIDEO_CONFIG["max_fps"], |
|
|
value=VIDEO_CONFIG["default_fps"], |
|
|
step=6 |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(elem_classes=["controls-section"]): |
|
|
gr.HTML("<b>📐 Resolution</b>") |
|
|
width_input = gr.Slider( |
|
|
label="Width", |
|
|
minimum=256, |
|
|
maximum=VIDEO_CONFIG["max_resolution"], |
|
|
value=VIDEO_CONFIG["default_width"], |
|
|
step=64 |
|
|
) |
|
|
|
|
|
height_input = gr.Slider( |
|
|
label="Height", |
|
|
minimum=256, |
|
|
maximum=VIDEO_CONFIG["max_resolution"], |
|
|
value=VIDEO_CONFIG["default_height"], |
|
|
step=64 |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(elem_classes=["controls-section"]): |
|
|
seed_input = gr.Number( |
|
|
label="Seed (-1 for random)", |
|
|
value=-1, |
|
|
precision=0 |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(elem_classes=["controls-section"]): |
|
|
gr.HTML("<b>⚙️ Advanced</b>") |
|
|
steps_input = gr.Slider( |
|
|
label="Steps", |
|
|
minimum=10, |
|
|
maximum=30, |
|
|
value=20, |
|
|
step=1 |
|
|
) |
|
|
|
|
|
cfg_input = gr.Slider( |
|
|
label="CFG Scale", |
|
|
minimum=1.0, |
|
|
maximum=15.0, |
|
|
value=7.0, |
|
|
step=0.1 |
|
|
) |
|
|
|
|
|
|
|
|
generate_button = gr.Button( |
|
|
"🎬 GENERATE VIDEO", |
|
|
elem_classes=["generate-btn"], |
|
|
variant="primary" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
video_output = gr.Video( |
|
|
label="Generated Video (MP4)", |
|
|
elem_classes=["video-output"], |
|
|
show_label=True, |
|
|
height=400 |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
mp4_download = gr.File( |
|
|
label="📱 Download MP4", |
|
|
visible=False |
|
|
) |
|
|
with gr.Column(scale=1): |
|
|
gif_download = gr.File( |
|
|
label="🎞️ Download GIF", |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
generation_info = gr.Textbox( |
|
|
label="Generation Info", |
|
|
interactive=False, |
|
|
elem_classes=["video-info"], |
|
|
show_label=True, |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
metadata_display = gr.Textbox( |
|
|
label="Video Metadata (Copy to save)", |
|
|
interactive=True, |
|
|
elem_classes=["metadata-box"], |
|
|
show_label=True, |
|
|
lines=15, |
|
|
visible=False, |
|
|
placeholder="Generated video metadata will appear here..." |
|
|
) |
|
|
|
|
|
|
|
|
def on_generate(prompt, style, neg_prompt, steps, cfg, seed, width, height, duration, fps): |
|
|
mp4_path, gif_path, info, metadata = generate_video( |
|
|
prompt, style, neg_prompt, steps, cfg, seed, width, height, duration, fps |
|
|
) |
|
|
|
|
|
if mp4_path is not None: |
|
|
return ( |
|
|
mp4_path, |
|
|
mp4_path if mp4_path and os.path.exists(mp4_path) else None, |
|
|
gif_path if gif_path and os.path.exists(gif_path) else None, |
|
|
gr.update(visible=True, value=info), |
|
|
gr.update(visible=True, value=metadata), |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=True) |
|
|
) |
|
|
else: |
|
|
return ( |
|
|
None, |
|
|
None, |
|
|
None, |
|
|
gr.update(visible=True, value=info if info else "Generation failed"), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False) |
|
|
) |
|
|
|
|
|
|
|
|
generate_button.click( |
|
|
fn=on_generate, |
|
|
inputs=[ |
|
|
prompt_input, style_input, negative_prompt_input, |
|
|
steps_input, cfg_input, seed_input, width_input, height_input, |
|
|
duration_input, fps_input |
|
|
], |
|
|
outputs=[ |
|
|
video_output, mp4_download, gif_download, |
|
|
generation_info, metadata_display, |
|
|
mp4_download, gif_download |
|
|
], |
|
|
show_progress=True |
|
|
) |
|
|
|
|
|
|
|
|
prompt_input.submit( |
|
|
fn=on_generate, |
|
|
inputs=[ |
|
|
prompt_input, style_input, negative_prompt_input, |
|
|
steps_input, cfg_input, seed_input, width_input, height_input, |
|
|
duration_input, fps_input |
|
|
], |
|
|
outputs=[ |
|
|
video_output, mp4_download, gif_download, |
|
|
generation_info, metadata_display, |
|
|
mp4_download, gif_download |
|
|
], |
|
|
show_progress=True |
|
|
) |
|
|
|
|
|
|
|
|
interface.load( |
|
|
fn=lambda: ( |
|
|
None, None, None, |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False) |
|
|
), |
|
|
outputs=[ |
|
|
video_output, mp4_download, gif_download, |
|
|
generation_info, metadata_display, |
|
|
mp4_download, gif_download |
|
|
] |
|
|
) |
|
|
|
|
|
return interface |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("🎬 Starting NSFW Video Generator...") |
|
|
print("Using complete repository structure for proper loading") |
|
|
print(f"🔧 Default Duration: {VIDEO_CONFIG['default_duration']}s") |
|
|
print(f"🔧 Default Resolution: {VIDEO_CONFIG['default_width']}×{VIDEO_CONFIG['default_height']}") |
|
|
print(f"🔧 Spaces GPU: {'✅ Available' if SPACES_AVAILABLE else '❌ Not Available'}") |
|
|
print(f"🔧 Compel Library: {'✅ Available' if COMPEL_AVAILABLE else '❌ Not Available'}") |
|
|
print(f"🔧 CUDA: {'✅ Available' if torch.cuda.is_available() else '❌ Not Available'}") |
|
|
|
|
|
app = create_interface() |
|
|
app.queue(max_size=5, default_concurrency_limit=1) |
|
|
|
|
|
app.launch( |
|
|
server_name="0.0.0.0", |
|
|
server_port=7860, |
|
|
show_error=True, |
|
|
share=False |
|
|
) |