|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import warnings |
|
|
warnings.filterwarnings("ignore", category=UserWarning) |
|
|
warnings.filterwarnings("ignore", category=FutureWarning) |
|
|
from huggingface_hub import logging as hf_logging, hf_hub_download |
|
|
hf_logging.set_verbosity_error() |
|
|
|
|
|
import os, sys, subprocess, shlex, tempfile, gc, shutil, contextlib, time, traceback, json, yaml, random |
|
|
from typing import List, Dict |
|
|
from pathlib import Path |
|
|
|
|
|
import torch |
|
|
import torch.nn.functional as F |
|
|
import numpy as np |
|
|
import imageio |
|
|
from PIL import Image |
|
|
from einops import rearrange |
|
|
|
|
|
|
|
|
LTXV_DEBUG = os.getenv("LTXV_DEBUG", "1") == "1" |
|
|
LTXV_FRAME_LOG_EVERY = int(os.getenv("LTXV_FRAME_LOG_EVERY", "8")) |
|
|
DEPS_DIR = Path("/data") |
|
|
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video" |
|
|
|
|
|
|
|
|
def run_setup(): |
|
|
setup_script_path = "setup.py" |
|
|
if not os.path.exists(setup_script_path): |
|
|
print("[DEBUG] 'setup.py' não encontrado. Pulando dependências.") |
|
|
return |
|
|
try: |
|
|
print("[DEBUG] Executando setup.py para instalar dependências...") |
|
|
subprocess.run([sys.executable, setup_script_path], check=True, capture_output=True, text=True) |
|
|
print("[DEBUG] Setup concluído.") |
|
|
except subprocess.CalledProcessError as e: |
|
|
print(f"[ERROR] Falha crítica ao executar setup.py: {e.stderr}") |
|
|
sys.exit(1) |
|
|
|
|
|
def add_deps_to_path(): |
|
|
repo_path = str(LTX_VIDEO_REPO_DIR.resolve()) |
|
|
if repo_path not in sys.path: |
|
|
sys.path.insert(0, repo_path) |
|
|
print(f"[DEBUG] Repositório LTX-Video adicionado ao sys.path.") |
|
|
|
|
|
if not LTX_VIDEO_REPO_DIR.exists(): |
|
|
run_setup() |
|
|
add_deps_to_path() |
|
|
|
|
|
from managers.vae_manager import vae_manager_singleton |
|
|
from tools.video_encode_tool import video_encode_tool_singleton |
|
|
from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline, adain_filter_latent |
|
|
from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy |
|
|
from ltx_video.models.autoencoders.vae_encode import un_normalize_latents, normalize_latents |
|
|
from api.ltx.inference import ( |
|
|
create_ltx_video_pipeline, create_latent_upsampler, |
|
|
load_image_to_tensor_with_resize_and_crop, seed_everething, |
|
|
calculate_padding, load_media_file |
|
|
) |
|
|
|
|
|
|
|
|
def calculate_new_dimensions(orig_w, orig_h, target_area=512*768, divisor=8): |
|
|
if orig_w <= 0 or orig_h <= 0: return 512, 768 |
|
|
aspect_ratio = orig_w / orig_h |
|
|
new_h = int((target_area / aspect_ratio)**0.5) |
|
|
new_w = int(new_h * aspect_ratio) |
|
|
final_w = max(divisor, round(new_w / divisor) * divisor) |
|
|
final_h = max(divisor, round(new_h / divisor) * divisor) |
|
|
return final_h, final_w |
|
|
|
|
|
def log_tensor_info(tensor, name="Tensor"): |
|
|
if not LTXV_DEBUG: return |
|
|
if not isinstance(tensor, torch.Tensor): print(f"\n[INFO] '{name}' não é um tensor."); return |
|
|
print(f"\n--- Tensor: {name} ---\n - Shape: {tuple(tensor.shape)}\n - Dtype: {tensor.dtype}\n - Device: {tensor.device}") |
|
|
if tensor.numel() > 0: |
|
|
try: print(f" - Stats: Min={tensor.min().item():.4f}, Max={tensor.max().item():.4f}, Mean={tensor.mean().item():.4f}") |
|
|
except: pass |
|
|
print("------------------------------------------\n") |
|
|
|
|
|
|
|
|
class VideoService: |
|
|
def __init__(self): |
|
|
t0 = time.perf_counter() |
|
|
print("[INFO] Inicializando VideoService...") |
|
|
self.device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
self.config = self._load_config() |
|
|
print(f"[INFO] Config: {self.config.get('precision')}, Sampler: {self.config.get('sampler')}, Device: {self.device}") |
|
|
self._tmp_dirs, self._tmp_files = set(), set() |
|
|
self.pipeline, self.latent_upsampler = self._load_models() |
|
|
self.pipeline.to(self.device) |
|
|
if self.latent_upsampler: self.latent_upsampler.to(self.device) |
|
|
self._apply_precision_policy() |
|
|
vae_manager_singleton.attach_pipeline(self.pipeline, device=self.device, autocast_dtype=self.runtime_autocast_dtype) |
|
|
if self.device == "cuda": torch.cuda.empty_cache() |
|
|
print(f"[SUCCESS] VideoService pronto. ({time.perf_counter()-t0:.2f}s)") |
|
|
|
|
|
def _load_config(self): |
|
|
|
|
|
base = LTX_VIDEO_REPO_DIR / "configs" |
|
|
candidates = [ |
|
|
base / "ltxv-13b-0.9.8-dev-fp8.yaml", |
|
|
base / "ltxv-13b-0.9.8-distilled-fp8.yaml", |
|
|
base / "ltxv-13b-0.9.8-distilled.yaml", |
|
|
] |
|
|
for cfg_path in candidates: |
|
|
if cfg_path.exists(): |
|
|
with open(cfg_path, "r") as file: return yaml.safe_load(file) |
|
|
raise FileNotFoundError(f"Nenhum arquivo de config YAML encontrado em {base}.") |
|
|
|
|
|
def _load_models(self): |
|
|
|
|
|
t0 = time.perf_counter() |
|
|
repo_id = self.config.get("repo_id", "Lightricks/LTX-Video") |
|
|
|
|
|
ckpt_path = hf_hub_download(repo_id=repo_id, filename=self.config["checkpoint_path"], token=os.getenv("HF_TOKEN")) |
|
|
pipeline = create_ltx_video_pipeline( |
|
|
ckpt_path=ckpt_path, |
|
|
precision=self.config["precision"], |
|
|
text_encoder_model_name_or_path=self.config["text_encoder_model_name_or_path"], |
|
|
sampler=self.config["sampler"], |
|
|
device="cpu" |
|
|
) |
|
|
|
|
|
latent_upsampler = None |
|
|
if self.config.get("spatial_upscaler_model_path"): |
|
|
upscaler_path = hf_hub_download(repo_id=repo_id, filename=self.config["spatial_upscaler_model_path"], token=os.getenv("HF_TOKEN")) |
|
|
latent_upsampler = create_latent_upsampler(upscaler_path, device="cpu") |
|
|
|
|
|
print(f"[DEBUG] Modelos carregados em {time.perf_counter() - t0:.2f}s") |
|
|
return pipeline, latent_upsampler |
|
|
|
|
|
def _apply_precision_policy(self): |
|
|
prec = str(self.config.get("precision", "")).lower() |
|
|
self.runtime_autocast_dtype = torch.float32 |
|
|
if "bfloat16" in prec or "fp8" in prec: self.runtime_autocast_dtype = torch.bfloat16 |
|
|
elif "mixed_precision" in prec or "fp16" in prec: self.runtime_autocast_dtype = torch.float16 |
|
|
print(f"[DEBUG] Dtype para Autocast: {self.runtime_autocast_dtype}") |
|
|
|
|
|
def finalize(self, keep_paths=None, clear_gpu=True): |
|
|
|
|
|
print("[INFO] Finalize: iniciando limpeza de recursos...") |
|
|
keep = set(keep_paths or []) |
|
|
for f in list(self._tmp_files): |
|
|
try: |
|
|
if f not in keep and os.path.isfile(f): os.remove(f) |
|
|
except Exception as e: print(f"[WARN] Falha ao remover tmp file {f}: {e}") |
|
|
finally: self._tmp_files.discard(f) |
|
|
for d in list(self._tmp_dirs): |
|
|
try: |
|
|
if d not in keep and os.path.isdir(d): shutil.rmtree(d, ignore_errors=True) |
|
|
except Exception as e: print(f"[WARN] Falha ao remover tmp dir {d}: {e}") |
|
|
finally: self._tmp_dirs.discard(d) |
|
|
gc.collect() |
|
|
if clear_gpu and self.device == "cuda": |
|
|
try: |
|
|
torch.cuda.empty_cache(); torch.cuda.ipc_collect() |
|
|
except Exception as e: print(f"[ERROR] Falha na limpeza da GPU: {e}") |
|
|
|
|
|
|
|
|
|
|
|
def _dividir_latentes_por_tamanho(self, latents_brutos, num_latente_por_chunk: int, overlap: int = 1): |
|
|
total_latentes = latents_brutos.shape[2] |
|
|
if num_latente_por_chunk >= total_latentes: |
|
|
return [latents_brutos] |
|
|
|
|
|
chunks = [] |
|
|
start = 0 |
|
|
while start < total_latentes: |
|
|
end = min(start + num_latente_por_chunk, total_latentes) |
|
|
|
|
|
end_with_overlap = min(end + overlap, total_latentes) if end < total_latentes else end |
|
|
chunk = latents_brutos[:, :, start:end_with_overlap, :, :].clone().detach() |
|
|
chunks.append(chunk) |
|
|
if LTXV_DEBUG: print(f"[DEBUG] Chunk criado: frames {start} a {end_with_overlap}") |
|
|
start = end |
|
|
return chunks |
|
|
|
|
|
def _get_total_frames(self, video_path: str) -> int: |
|
|
cmd = ["ffprobe", "-v", "error", "-select_streams", "v:0", "-count_frames", "-show_entries", "stream=nb_read_frames", "-of", "default=nokey=1:noprint_wrappers=1", str(video_path)] |
|
|
try: |
|
|
result = subprocess.run(cmd, capture_output=True, text=True, check=True) |
|
|
return int(result.stdout.strip()) |
|
|
except (subprocess.CalledProcessError, ValueError) as e: |
|
|
print(f"[ERROR] FFprobe falhou para {video_path}: {e}") |
|
|
return 0 |
|
|
|
|
|
def _gerar_lista_com_transicoes(self, pasta: str, video_paths: list[str], crossfade_frames: int = 8) -> list[str]: |
|
|
if len(video_paths) <= 1: return video_paths |
|
|
|
|
|
print("[DEBUG] Iniciando processo de concatenação com transições...") |
|
|
arquivos_para_concatenar = [] |
|
|
temp_blend_files = [] |
|
|
|
|
|
|
|
|
primeiro_video = video_paths[0] |
|
|
total_frames_primeiro = self._get_total_frames(primeiro_video) |
|
|
path_primeiro_cortado = os.path.join(pasta, "0_head.mp4") |
|
|
cmd_primeiro = f'ffmpeg -y -hide_banner -loglevel error -i "{primeiro_video}" -vf "trim=end_frame={total_frames_primeiro - crossfade_frames},setpts=PTS-STARTPTS" -an "{path_primeiro_cortado}"' |
|
|
subprocess.run(cmd_primeiro, shell=True, check=True) |
|
|
arquivos_para_concatenar.append(path_primeiro_cortado) |
|
|
|
|
|
|
|
|
for i in range(len(video_paths) - 1): |
|
|
video_A_path = video_paths[i] |
|
|
video_B_path = video_paths[i+1] |
|
|
|
|
|
total_frames_A = self._get_total_frames(video_A_path) |
|
|
|
|
|
|
|
|
cauda_A = os.path.join(pasta, f"{i}_tail_A.mp4") |
|
|
cabeca_B = os.path.join(pasta, f"{i+1}_head_B.mp4") |
|
|
cmd_cauda_A = f'ffmpeg -y -hide_banner -loglevel error -i "{video_A_path}" -vf "trim=start_frame={total_frames_A - crossfade_frames},setpts=PTS-STARTPTS" -an "{cauda_A}"' |
|
|
cmd_cabeca_B = f'ffmpeg -y -hide_banner -loglevel error -i "{video_B_path}" -vf "trim=end_frame={crossfade_frames},setpts=PTS-STARTPTS" -an "{cabeca_B}"' |
|
|
subprocess.run(cmd_cauda_A, shell=True, check=True) |
|
|
subprocess.run(cmd_cabeca_B, shell=True, check=True) |
|
|
|
|
|
|
|
|
blend_path = os.path.join(pasta, f"blend_{i}_{i+1}.mp4") |
|
|
cmd_blend = f'ffmpeg -y -hide_banner -loglevel error -i "{cauda_A}" -i "{cabeca_B}" -filter_complex "[0:v][1:v]blend=all_expr=\'A*(1-T/{crossfade_frames})+B*(T/{crossfade_frames})\',format=yuv420p" -an "{blend_path}"' |
|
|
subprocess.run(cmd_blend, shell=True, check=True) |
|
|
arquivos_para_concatenar.append(blend_path) |
|
|
temp_blend_files.extend([cauda_A, cabeca_B]) |
|
|
|
|
|
|
|
|
if i + 1 < len(video_paths) - 1: |
|
|
meio_B = os.path.join(pasta, f"{i+1}_body.mp4") |
|
|
total_frames_B = self._get_total_frames(video_B_path) |
|
|
cmd_meio_B = f'ffmpeg -y -hide_banner -loglevel error -i "{video_B_path}" -vf "trim=start_frame={crossfade_frames}:end_frame={total_frames_B - crossfade_frames},setpts=PTS-STARTPTS" -an "{meio_B}"' |
|
|
subprocess.run(cmd_meio_B, shell=True, check=True) |
|
|
arquivos_para_concatenar.append(meio_B) |
|
|
|
|
|
|
|
|
ultimo_video = video_paths[-1] |
|
|
path_ultimo_cortado = os.path.join(pasta, f"{len(video_paths)-1}_tail.mp4") |
|
|
cmd_ultimo = f'ffmpeg -y -hide_banner -loglevel error -i "{ultimo_video}" -vf "trim=start_frame={crossfade_frames},setpts=PTS-STARTPTS" -an "{path_ultimo_cortado}"' |
|
|
subprocess.run(cmd_ultimo, shell=True, check=True) |
|
|
arquivos_para_concatenar.append(path_ultimo_cortado) |
|
|
|
|
|
|
|
|
for f in temp_blend_files: os.remove(f) |
|
|
|
|
|
return arquivos_para_concatenar |
|
|
|
|
|
def _concat_mp4s_no_reencode(self, mp4_list: List[str], out_path: str): |
|
|
if not mp4_list: raise ValueError("Lista de MP4s para concatenar está vazia.") |
|
|
if len(mp4_list) == 1: |
|
|
shutil.move(mp4_list[0], out_path) |
|
|
return |
|
|
|
|
|
with tempfile.NamedTemporaryFile("w", delete=False, suffix=".txt", dir=os.path.dirname(out_path)) as f: |
|
|
for mp4 in mp4_list: |
|
|
f.write(f"file '{os.path.abspath(mp4)}'\n") |
|
|
list_path = f.name |
|
|
|
|
|
cmd = f"ffmpeg -y -f concat -safe 0 -i {list_path} -c copy {out_path}" |
|
|
try: |
|
|
subprocess.run(shlex.split(cmd), check=True, capture_output=True, text=True) |
|
|
except subprocess.CalledProcessError as e: |
|
|
print(f"[ERROR] Concatenação falhou: {e.stderr}") |
|
|
raise |
|
|
finally: |
|
|
os.remove(list_path) |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def generate_low(self, call_kwargs, guidance_scale, width, height): |
|
|
first_pass_config = self.config.get("first_pass", {}).copy() |
|
|
first_pass_config.pop("num_inference_steps", None) |
|
|
|
|
|
first_pass_kwargs = call_kwargs.copy() |
|
|
first_pass_kwargs.update({ |
|
|
"output_type": "latent", |
|
|
"width": width, |
|
|
"height": height, |
|
|
"guidance_scale": float(guidance_scale), |
|
|
**first_pass_config |
|
|
}) |
|
|
|
|
|
print(f"[DEBUG] First Pass: Gerando em {width}x{height}...") |
|
|
latents = self.pipeline(**first_pass_kwargs).images |
|
|
log_tensor_info(latents, "Latentes Base (First Pass)") |
|
|
|
|
|
partes_mp4 = [latents] |
|
|
|
|
|
if len(partes_mp4) > 1: |
|
|
print("[INFO] Múltiplos chunks gerados. Concatenando com transições...") |
|
|
final_output_path = os.path.join(results_dir, f"final_{used_seed}.mp4") |
|
|
partes_para_concatenar = self._gerar_lista_com_transicoes(temp_dir, partes_mp4, crossfade_frames=8) |
|
|
self._concat_mp4s_no_reencode(partes_para_concatenar, final_output_path) |
|
|
elif partes_mp4: |
|
|
print("[INFO] Apenas um chunk gerado. Movendo para o destino final.") |
|
|
final_output_path = os.path.join(results_dir, f"final_{used_seed}.mp4") |
|
|
shutil.move(partes_mp4[0], final_output_path) |
|
|
else: |
|
|
raise RuntimeError("Nenhum vídeo foi gerado.") |
|
|
|
|
|
return final_output_path |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def generate(self, prompt: str, **kwargs): |
|
|
final_output_path, used_seed = None, None |
|
|
try: |
|
|
t_all = time.perf_counter() |
|
|
print(f"\n{'='*20} INICIANDO NOVA GERAÇÃO {'='*20}") |
|
|
if self.device == "cuda": torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
negative_prompt = kwargs.get("negative_prompt", "") |
|
|
mode = kwargs.get("mode", "text-to-video") |
|
|
height = kwargs.get("height", 512) |
|
|
width = kwargs.get("width", 704) |
|
|
duration = kwargs.get("duration", 2.0) |
|
|
guidance_scale = kwargs.get("guidance_scale", 3.0) |
|
|
improve_texture = kwargs.get("improve_texture", True) |
|
|
|
|
|
used_seed = random.randint(0, 2**32 - 1) if kwargs.get("randomize_seed", True) else int(kwargs.get("seed", 42)) |
|
|
seed_everething(used_seed) |
|
|
print(f"[INFO] Geração com Seed: {used_seed}") |
|
|
|
|
|
FPS = 24.0 |
|
|
actual_num_frames = max(9, int(round(duration * FPS) / 8) * 8 + 1) |
|
|
height_padded = ((height - 1) // 8 + 1) * 8 |
|
|
width_padded = ((width - 1) // 8 + 1) * 8 |
|
|
padding_values = calculate_padding(height, width, height_padded, width_padded) |
|
|
generator = torch.Generator(device=self.device).manual_seed(used_seed) |
|
|
|
|
|
temp_dir = tempfile.mkdtemp(prefix="ltxv_"); self._tmp_dirs.add(temp_dir) |
|
|
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
conditioning_items = [] |
|
|
|
|
|
|
|
|
|
|
|
call_kwargs = { "prompt": prompt, "negative_prompt": negative_prompt, "height": height_padded, "width": width_padded, "num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": generator, "output_type": "latent", "conditioning_items": conditioning_items or None } |
|
|
|
|
|
|
|
|
latents_list = [] |
|
|
ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) |
|
|
|
|
|
with ctx: |
|
|
if improve_texture: |
|
|
|
|
|
downscale_factor = self.config.get("downscale_factor", 0.66666) |
|
|
low_res_area = (width * height) * (downscale_factor**2) |
|
|
downscaled_h, downscaled_w = calculate_new_dimensions(width, height, target_area=low_res_area) |
|
|
|
|
|
base_latents = self.generate_low(call_kwargs, guidance_scale, downscaled_w, downscaled_h) |
|
|
|
|
|
|
|
|
upsampled_latents = self._upsample_latents_internal(base_latents) |
|
|
upsampled_latents = adain_filter_latent(latents=upsampled_latents, reference_latents=base_latents) |
|
|
del base_latents; gc.collect(); torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
second_pass_config = self.config.get("second_pass", {}).copy() |
|
|
second_pass_kwargs = call_kwargs.copy() |
|
|
second_pass_kwargs.update({ |
|
|
"latents": upsampled_latents, "guidance_scale": guidance_scale, **second_pass_config |
|
|
}) |
|
|
final_latents = self.pipeline(**second_pass_kwargs).images |
|
|
latents_list.append(final_latents.detach().cpu()) |
|
|
del final_latents, upsampled_latents; gc.collect(); torch.cuda.empty_cache() |
|
|
else: |
|
|
|
|
|
single_pass_latents = self.pipeline(**call_kwargs).images |
|
|
latents_list.append(single_pass_latents.detach().cpu()) |
|
|
del single_pass_latents; gc.collect(); torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
partes_mp4 = [] |
|
|
chunk_count = 0 |
|
|
for i, latents_cpu in enumerate(latents_list): |
|
|
|
|
|
latents_parts = self._dividir_latentes_por_tamanho(latents_cpu, 16, 8) |
|
|
|
|
|
for chunk in latents_parts: |
|
|
chunk_count += 1 |
|
|
print(f"[INFO] Decodificando chunk {chunk_count}/{len(latents_parts) * len(latents_list)}...") |
|
|
pixel_tensor = vae_manager_singleton.decode(chunk.to(self.device), decode_timestep=self.config.get("decode_timestep", 0.05)) |
|
|
|
|
|
chunk_video_path = os.path.join(temp_dir, f"part_{chunk_count}.mp4") |
|
|
video_encode_tool_singleton.save_video_from_tensor(pixel_tensor, chunk_video_path, fps=FPS) |
|
|
|
|
|
partes_mp4.append(chunk_video_path) |
|
|
del pixel_tensor, chunk; gc.collect(); torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
if len(partes_mp4) > 1: |
|
|
print("[INFO] Múltiplos chunks gerados. Concatenando com transições...") |
|
|
final_output_path = os.path.join(results_dir, f"final_{used_seed}.mp4") |
|
|
partes_para_concatenar = self._gerar_lista_com_transicoes(temp_dir, partes_mp4, crossfade_frames=8) |
|
|
self._concat_mp4s_no_reencode(partes_para_concatenar, final_output_path) |
|
|
elif partes_mp4: |
|
|
print("[INFO] Apenas um chunk gerado. Movendo para o destino final.") |
|
|
final_output_path = os.path.join(results_dir, f"final_{used_seed}.mp4") |
|
|
shutil.move(partes_mp4[0], final_output_path) |
|
|
else: |
|
|
raise RuntimeError("Nenhum vídeo foi gerado.") |
|
|
|
|
|
print(f"[SUCCESS] Geração concluída em {time.perf_counter() - t_all:.2f}s. Vídeo: {final_output_path}") |
|
|
return final_output_path, used_seed |
|
|
|
|
|
except Exception as e: |
|
|
print(f"[FATAL ERROR] A geração falhou: {type(e).__name__} - {e}") |
|
|
traceback.print_exc() |
|
|
raise |
|
|
finally: |
|
|
self.finalize(keep_paths=[final_output_path] if final_output_path else []) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
print("Iniciando carregamento do VideoService...") |
|
|
video_generation_service = VideoService() |
|
|
print("\n[INFO] VideoService pronto para receber tarefas.") |