import argparse import os from datetime import datetime from pathlib import Path from typing import List import numpy as np import torch from PIL import Image import gradio as gr import json import imageio # Mock imports for demo - replace with actual imports when models are available try: from huggingface_hub import snapshot_download from diffusers import AutoencoderKL, DDIMScheduler from transformers import CLIPVisionModelWithProjection from omegaconf import OmegaConf import spaces HAS_MODELS = True except ImportError as e: print(f"Warning: Some dependencies not available: {e}") HAS_MODELS = False MOTION_TRIGGER_WORD = { 'sports_basketball_gym': 'Basketball in Gym', 'sports_nba_pass': 'NBA Pass', 'sports_nba_dunk': 'NBA Dunk', 'movie_BruceLee1': 'Bruce Lee Style', 'shorts_kungfu_match1': 'Kung Fu Match', 'shorts_kungfu_desert1': 'Desert Kung Fu', 'parkour_climbing': 'Parkour Climbing', 'dance_indoor_1': 'Indoor Dance', } css_style = "#fixed_size_img {height: 500px;}" def download_models(): """Download required models from Hugging Face - simplified for demo""" print("Model downloading simulation...") # Create directory structure os.makedirs('./pretrained_weights', exist_ok=True) os.makedirs('./assets/masks', exist_ok=True) os.makedirs('./assets/test_image', exist_ok=True) os.makedirs('./assets/video_template', exist_ok=True) if HAS_MODELS: # Add actual model downloading logic here pass else: print("Skipping model download - dependencies not available") class MIMODemo(): def __init__(self): self.device = "cuda" if torch.cuda.is_available() else "cpu" print(f"Using device: {self.device}") try: download_models() print("MIMO demo initialized") except Exception as e: print(f"Initialization warning: {e}") def generate_video(self, image, motion_template): """Generate video from image and motion template""" try: if image is None: return None, "⚠️ Please upload an image first." print(f"Processing with template: {motion_template}") # Create a simple demo video (replace with actual MIMO inference) frames = [] for i in range(30): # 30 frames for demo # Create a simple animation effect img_array = np.array(image) # Add some simple transformation for demo shift = int(10 * np.sin(i * 0.2)) transformed = np.roll(img_array, shift, axis=1) frames.append(transformed) # Save video save_dir = 'output' os.makedirs(save_dir, exist_ok=True) case = datetime.now().strftime("%Y%m%d%H%M%S") outpath = f"{save_dir}/{case}.mp4" imageio.mimsave(outpath, frames, fps=15, quality=8) print(f'Demo video saved to: {outpath}') return outpath, f"✅ Generated demo animation for {MOTION_TRIGGER_WORD[motion_template]}!" except Exception as e: print(f"Error in video generation: {e}") return None, f"❌ Error: {str(e)}" def create_interface(): """Create Gradio interface compatible with v3.41.2""" # Initialize MIMO mimo = MIMODemo() # Custom CSS css = """ #fixed_size_img { height: 500px !important; max-height: 500px !important; } .gradio-container { max-width: 1200px !important; margin: auto !important; } """ with gr.Blocks(css=css, title="MIMO Demo") as demo: # Title gr.HTML("""

🎭 MIMO Demo - Controllable Character Video Synthesis

Transform character images into animated videos with controllable motion and scenes

Project Page | Paper | GitHub

""") # Instructions with gr.Accordion("🧭 Instructions", open=True): gr.Markdown(""" ### How to use: 1. **Upload a character image**: Use a full-body, front-facing image with clear visibility 2. **Select motion template**: Choose from the available motion templates 3. **Generate**: Click "Generate Animation" to create your character animation ### Tips: - Best results with clear, well-lit character images - Processing may take 1-2 minutes depending on video length - This is a demo version - full functionality requires GPU resources """) with gr.Row(): with gr.Column(): # Input image img_input = gr.Image( label='Upload Character Image', type="pil", elem_id="fixed_size_img" ) # Motion template selector motion_dropdown = gr.Dropdown( choices=list(MOTION_TRIGGER_WORD.keys()), value=list(MOTION_TRIGGER_WORD.keys())[0], label="Select Motion Template", ) # Generate button submit_btn = gr.Button("🎬 Generate Animation", variant='primary') # Status display status_text = gr.Textbox( label="Status", interactive=False, value="Ready to generate... (Demo mode)" ) with gr.Column(): # Output video output_video = gr.Video( label="Generated Animation", elem_id="fixed_size_img" ) # Event handlers submit_btn.click( fn=mimo.generate_video, inputs=[img_input, motion_dropdown], outputs=[output_video, status_text], ) # Example images (if available) example_dir = './assets/test_image' if os.path.exists(example_dir): example_files = [f for f in os.listdir(example_dir) if f.endswith(('.jpg', '.png', '.jpeg'))] if example_files: example_paths = [[os.path.join(example_dir, f)] for f in example_files[:5]] gr.Examples( examples=example_paths, inputs=[img_input], label="Example Images" ) return demo if __name__ == "__main__": print("🚀 Starting MIMO Demo...") # Create and launch interface demo = create_interface() # Launch with settings optimized for HF Spaces demo.launch( server_name="0.0.0.0", server_port=7860, share=False, show_error=True, quiet=False )