Test / api /ltx_server_refactored.py
Eueuiaa's picture
Update api/ltx_server_refactored.py
cae55d9 verified
raw
history blame
29.2 kB
# ltx_server_refactored.py — VideoService (Modular Version with Simple Overlap Chunking)
import warnings
from huggingface_hub import logging
import os, subprocess, shlex, tempfile
import torch
import json
import numpy as np
import random
import os
import shlex
import yaml
from typing import List, Dict
from pathlib import Path
import imageio
from PIL import Image
import tempfile
from huggingface_hub import hf_hub_download
import sys
import subprocess
import gc
import shutil
import contextlib
import time
import traceback
from api.gpu_manager import gpu_manager
from einops import rearrange
import torch.nn.functional as F
from managers.vae_manager import vae_manager_singleton
from tools.video_encode_tool import video_encode_tool_singleton
DEPS_DIR = Path("/data")
LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
logging.set_verbosity_error()
logging.set_verbosity_warning()
logging.set_verbosity_info()
logging.set_verbosity_debug()
LTXV_DEBUG=1
LTXV_FRAME_LOG_EVERY=8
warnings.filterwarnings("ignore", category=UserWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", message=".*")
# (Todas as funções de setup, helpers e inicialização da classe permanecem inalteradas)
# ... (run_setup, add_deps_to_path, _query_gpu_processes_via_nvml, etc.)
def run_setup():
setup_script_path = "setup.py"
if not os.path.exists(setup_script_path):
print("[DEBUG] 'setup.py' não encontrado. Pulando clonagem de dependências.")
return
try:
print("[DEBUG] Executando setup.py para dependências...")
subprocess.run([sys.executable, setup_script_path], check=True)
print("[DEBUG] Setup concluído com sucesso.")
except subprocess.CalledProcessError as e:
print(f"[DEBUG] ERRO no setup.py (code {e.returncode}). Abortando.")
sys.exit(1)
if not LTX_VIDEO_REPO_DIR.exists():
print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
run_setup()
def add_deps_to_path():
repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
sys.path.insert(0, repo_path)
print(f"[DEBUG] Repo adicionado ao sys.path: {repo_path}")
def calculate_padding(orig_h, orig_w, target_h, target_w):
pad_h = target_h - orig_h
pad_w = target_w - orig_w
pad_top = pad_h // 2
pad_bottom = pad_h - pad_top
pad_left = pad_w // 2
pad_right = pad_w - pad_left
return (pad_left, pad_right, pad_top, pad_bottom)
def log_tensor_info(tensor, name="Tensor"):
if not isinstance(tensor, torch.Tensor):
print(f"\n[INFO] '{name}' não é tensor.")
return
print(f"\n--- Tensor: {name} ---")
print(f" - Shape: {tuple(tensor.shape)}")
print(f" - Dtype: {tensor.dtype}")
print(f" - Device: {tensor.device}")
if tensor.numel() > 0:
try:
print(f" - Min: {tensor.min().item():.4f} Max: {tensor.max().item():.4f} Mean: {tensor.mean().item():.4f}")
except Exception:
pass
print("------------------------------------------\n")
add_deps_to_path()
from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline
from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
from ltx_video.models.autoencoders.vae_encode import un_normalize_latents, normalize_latents
from ltx_video.pipelines.pipeline_ltx_video import adain_filter_latent
from api.ltx.inference import (
create_ltx_video_pipeline,
create_latent_upsampler,
load_image_to_tensor_with_resize_and_crop,
seed_everething,
)
class VideoService:
def _load_config(self):
base = LTX_VIDEO_REPO_DIR / "configs"
config_path = base / "ltxv-13b-0.9.8-distilled-fp8.yaml"
with open(config_path, "r") as file:
return yaml.safe_load(file)
def finalize(self, keep_paths=None, extra_paths=None, clear_gpu=True):
print("[DEBUG] Finalize: iniciando limpeza...")
keep = set(keep_paths or []); extras = set(extra_paths or [])
gc.collect()
try:
if clear_gpu and torch.cuda.is_available():
torch.cuda.empty_cache()
try:
torch.cuda.ipc_collect()
except Exception:
pass
except Exception as e:
print(f"[DEBUG] Finalize: limpeza GPU falhou: {e}")
try:
self._log_gpu_memory("Após finalize")
except Exception as e:
print(f"[DEBUG] Log GPU pós-finalize falhou: {e}")
def _load_models(self):
t0 = time.perf_counter()
LTX_REPO = "Lightricks/LTX-Video"
print("[DEBUG] Baixando checkpoint principal...")
distilled_model_path = hf_hub_download(
repo_id=LTX_REPO,
filename=self.config["checkpoint_path"],
local_dir=os.getenv("HF_HOME"),
cache_dir=os.getenv("HF_HOME_CACHE"),
token=os.getenv("HF_TOKEN"),
)
self.config["checkpoint_path"] = distilled_model_path
print(f"[DEBUG] Checkpoint em: {distilled_model_path}")
print("[DEBUG] Baixando upscaler espacial...")
spatial_upscaler_path = hf_hub_download(
repo_id=LTX_REPO,
filename=self.config["spatial_upscaler_model_path"],
local_dir=os.getenv("HF_HOME"),
cache_dir=os.getenv("HF_HOME_CACHE"),
token=os.getenv("HF_TOKEN")
)
self.config["spatial_upscaler_model_path"] = spatial_upscaler_path
print(f"[DEBUG] Upscaler em: {spatial_upscaler_path}")
print("[DEBUG] Construindo pipeline...")
pipeline = create_ltx_video_pipeline(
ckpt_path=self.config["checkpoint_path"],
precision=self.config["precision"],
text_encoder_model_name_or_path=self.config["text_encoder_model_name_or_path"],
sampler=self.config["sampler"],
device="cpu",
enhance_prompt=False,
prompt_enhancer_image_caption_model_name_or_path=self.config["prompt_enhancer_image_caption_model_name_or_path"],
prompt_enhancer_llm_model_name_or_path=self.config["prompt_enhancer_llm_model_name_or_path"],
)
print("[DEBUG] Pipeline pronto.")
latent_upsampler = None
if self.config.get("spatial_upscaler_model_path"):
print("[DEBUG] Construindo latent_upsampler...")
latent_upsampler = create_latent_upsampler(self.config["spatial_upscaler_model_path"], device="cpu")
print("[DEBUG] Upsampler pronto.")
print(f"[DEBUG] _load_models() tempo total={time.perf_counter()-t0:.3f}s")
return pipeline, latent_upsampler
def _apply_precision_policy(self):
prec = str(self.config.get("precision", "")).lower()
self.runtime_autocast_dtype = torch.float32
if prec in ["float8_e4m3fn", "bfloat16"]:
self.runtime_autocast_dtype = torch.bfloat16
elif prec == "mixed_precision":
self.runtime_autocast_dtype = torch.float16
def _register_tmp_dir(self, d: str):
if d and os.path.isdir(d):
self._tmp_dirs.add(d); print(f"[DEBUG] Registrado tmp dir: {d}")
@torch.no_grad()
def _upsample_latents_internal(self, latents: torch.Tensor) -> torch.Tensor:
try:
if not self.latent_upsampler:
raise ValueError("Latent Upsampler não está carregado.")
latents_unnormalized = un_normalize_latents(latents, self.pipeline.vae, vae_per_channel_normalize=True)
upsampled_latents = self.latent_upsampler(latents_unnormalized)
return normalize_latents(upsampled_latents, self.pipeline.vae, vae_per_channel_normalize=True)
except Exception as e:
pass
finally:
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
self.finalize(keep_paths=[])
def _prepare_conditioning_tensor(self, filepath, height, width, padding_values):
tensor = load_image_to_tensor_with_resize_and_crop(filepath, height, width)
tensor = torch.nn.functional.pad(tensor, padding_values)
return tensor.to(self.device, dtype=self.runtime_autocast_dtype)
def _save_and_log_video(self, pixel_tensor, base_filename, fps, temp_dir, results_dir, used_seed, progress_callback=None):
output_path = os.path.join(temp_dir, f"{base_filename}_{used_seed}.mp4")
video_encode_tool_singleton.save_video_from_tensor(
pixel_tensor, output_path, fps=fps, progress_callback=progress_callback
)
final_path = os.path.join(results_dir, f"{base_filename}_{used_seed}.mp4")
shutil.move(output_path, final_path)
print(f"[DEBUG] Vídeo salvo em: {final_path}")
return final_path
# ==============================================================================
# --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
# ==============================================================================
def prepare_condition_items(self, items_list: List, height: int, width: int, num_frames: int):
if not items_list: return []
height_padded = ((height - 1) // 8 + 1) * 8
width_padded = ((width - 1) // 8 + 1) * 8
padding_values = calculate_padding(height, width, height_padded, width_padded)
conditioning_items = []
for media, frame, weight in items_list:
tensor = self._prepare_conditioning_tensor(media, height, width, padding_values) if isinstance(media, str) else media.to(self.device, dtype=self.runtime_autocast_dtype)
safe_frame = max(0, min(int(frame), num_frames - 1))
conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
return conditioning_items
def generate_low_old(self, prompt, negative_prompt, height, width, duration, guidance_scale, seed, conditioning_items=None):
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
seed_everething(used_seed)
FPS = 24.0
actual_num_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
height_padded = ((height - 1) // 8 + 1) * 8
width_padded = ((width - 1) // 8 + 1) * 8
temp_dir = tempfile.mkdtemp(prefix="ltxv_low_"); self._register_tmp_dir(temp_dir)
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
downscale_factor = self.config.get("downscale_factor", 0.6666666)
vae_scale_factor = self.pipeline.vae_scale_factor
x_width = int(width_padded * downscale_factor)
downscaled_width = x_width - (x_width % vae_scale_factor)
x_height = int(height_padded * downscale_factor)
downscaled_height = x_height - (x_height % vae_scale_factor)
first_pass_kwargs = {
"prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
"num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
"output_type": "latent", "conditioning_items": conditioning_items, "guidance_scale": float(guidance_scale),
**(self.config.get("first_pass", {}))
}
try:
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
latents = self.pipeline(**first_pass_kwargs).images
pixel_tensor = vae_manager_singleton.decode(latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
video_path = self._save_and_log_video(pixel_tensor, "low_res_video", FPS, temp_dir, results_dir, used_seed)
latents_cpu = latents.detach().to("cpu")
tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
torch.save(latents_cpu, tensor_path)
return video_path, tensor_path, used_seed
except Exception as e:
pass
finally:
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
self.finalize(keep_paths=[])
def _generate_single_chunk_low(self, prompt, negative_prompt, height, width, num_frames, guidance_scale, seed, initial_latent_condition=None, image_conditions=None, ltx_configs_override=None):
"""
[NÓ DE GERAÇÃO]
Gera um ÚNICO chunk de latentes brutos. Esta é a unidade de trabalho fundamental.
"""
# (Esta função auxiliar permanece a mesma da nossa última versão, com a lógica de override)
print("\n" + "-"*20 + " INÍCIO: _generate_single_chunk_low " + "-"*20)
height_padded = ((height - 1) // 8 + 1) * 8
width_padded = ((width - 1) // 8 + 1) * 8
generator = torch.Generator(device=self.device).manual_seed(seed)
downscale_factor = self.config.get("downscale_factor", 0.6666666)
vae_scale_factor = self.pipeline.vae_scale_factor
x_width = int(width_padded * downscale_factor)
downscaled_width = x_width - (x_width % vae_scale_factor)
x_height = int(height_padded * downscale_factor)
downscaled_height = x_height - (x_height % vae_scale_factor)
all_conditions = []
if image_conditions: all_conditions.extend(image_conditions)
if initial_latent_condition: all_conditions.append(initial_latent_condition)
first_pass_config = self.config.get("first_pass", {}).copy()
if ltx_configs_override:
print("[DEBUG] Sobrepondo configurações do LTX com valores da UI...")
if "first_pass_num_inference_steps" in ltx_configs_override:
first_pass_config["num_inference_steps"] = ltx_configs_override["first_pass_num_inference_steps"]
if "first_pass_guidance_scale" in ltx_configs_override:
max_val = max(first_pass_config.get("guidance_scale", [1]))
new_max_val = ltx_configs_override["first_pass_guidance_scale"]
first_pass_config["guidance_scale"] = [new_max_val if x==max_val else x for x in first_pass_config["guidance_scale"]]
first_pass_kwargs = {
"prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
"num_frames": num_frames, "frame_rate": 24, "generator": generator, "output_type": "latent",
"conditioning_items": all_conditions if all_conditions else None,
**first_pass_config
}
# Removido guidance_scale daqui pois agora está dentro do first_pass_config
if "guidance_scale" in first_pass_kwargs:
del first_pass_kwargs['guidance_scale']
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
latents_bruto = self.pipeline(**first_pass_kwargs).images
log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
print("-" * 20 + " FIM: _generate_single_chunk_low " + "-"*20)
return latents_bruto
def generate_narrative_low(self, prompt: str, negative_prompt, height, width, duration, guidance_scale, seed, initial_image_conditions=None, overlap_frames: int = 8, ltx_configs_override: dict = None):
"""
[ORQUESTRADOR NARRATIVO]
Gera um vídeo em múltiplos chunks sequenciais a partir de um prompt com várias linhas.
"""
print("\n" + "="*80)
print("====== INICIANDO GERAÇÃO NARRATIVA EM CHUNKS (LOW-RES) ======")
print("="*80)
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
seed_everething(used_seed)
FPS = 24.0
prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()]
num_chunks = len(prompt_list)
if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
if num_chunks > 1:
total_blocks = (total_actual_frames - 1) // 8
blocks_per_chunk = total_blocks // num_chunks
blocks_last_chunk = total_blocks - (blocks_per_chunk * (num_chunks - 1))
frames_per_chunk = blocks_per_chunk * 8 + 1
frames_per_chunk_last = blocks_last_chunk * 8 + 1
else:
frames_per_chunk = total_actual_frames
frames_per_chunk_last = total_actual_frames
frames_per_chunk = max(9, frames_per_chunk)
frames_per_chunk_last = max(9, frames_per_chunk_last)
poda_latents_num = overlap_frames // self.pipeline.video_scale_factor if self.pipeline.video_scale_factor > 0 else 0
latentes_chunk_video = []
condition_item_latent_overlap = None
temp_dir = tempfile.mkdtemp(prefix="ltxv_narrative_"); self._register_tmp_dir(temp_dir)
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
for i, chunk_prompt in enumerate(prompt_list):
print(f"\n--- Gerando Chunk Narrativo {i+1}/{num_chunks}: '{chunk_prompt}' ---")
current_image_conditions = []
if initial_image_conditions:
cond_item_original = initial_image_conditions[0]
if i == 0:
current_image_conditions.append(cond_item_original)
else:
cond_item_fraco = ConditioningItem(
media_item=cond_item_original.media_item, media_frame_number=0, conditioning_strength=0.1
)
current_image_conditions.append(cond_item_fraco)
num_frames_para_gerar = frames_per_chunk_last if i == num_chunks - 1 else frames_per_chunk
if i > 0 and poda_latents_num > 0:
num_frames_para_gerar += overlap_frames
latentes_bruto = self._generate_single_chunk_low(
prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
num_frames=num_frames_para_gerar, guidance_scale=guidance_scale, seed=used_seed + i,
initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
ltx_configs_override=ltx_configs_override
)
if i > 0 and poda_latents_num > 0:
latentes_bruto = latentes_bruto[:, :, poda_latents_num:, :, :]
latentes_podado = latentes_bruto.clone().detach()
if i < num_chunks - 1 and poda_latents_num > 0:
latentes_podado = latentes_bruto[:, :, :-poda_latents_num, :, :].clone()
overlap_latents = latentes_bruto[:, :, -poda_latents_num:, :, :].clone()
condition_item_latent_overlap = ConditioningItem(
media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0
)
latentes_chunk_video.append(latentes_podado)
print("\n--- Finalizando Narrativa: Concatenando chunks ---")
final_latents = torch.cat(latentes_chunk_video, dim=2)
log_tensor_info(final_latents, "Tensor de Latentes Final Concatenado")
tensor_path = os.path.join(results_dir, f"latents_narrative_{used_seed}.pt")
torch.save(final_latents.cpu(), tensor_path)
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
pixel_tensor = vae_manager_singleton.decode(final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
video_path = self._save_and_log_video(pixel_tensor, "narrative_video", FPS, temp_dir, results_dir, used_seed)
self.finalize(keep_paths=[video_path, tensor_path])
return video_path, tensor_path, used_seed
def generate_single_low(self, prompt: str, negative_prompt, height, width, duration, guidance_scale, seed, initial_image_conditions=None, ltx_configs_override: dict = None):
"""
[ORQUESTRADOR SIMPLES]
Gera um vídeo completo em um único chunk. Ideal para prompts simples e curtos.
"""
print("\n" + "="*80)
print("====== INICIANDO GERAÇÃO SIMPLES EM CHUNK ÚNICO (LOW-RES) ======")
print("="*80)
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
seed_everething(used_seed)
FPS = 24.0
total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
temp_dir = tempfile.mkdtemp(prefix="ltxv_single_"); self._register_tmp_dir(temp_dir)
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
# Chama a função de geração de chunk único para fazer todo o trabalho
final_latents = self._generate_single_chunk_low(
prompt=prompt,
negative_prompt=negative_prompt,
height=height, width=width,
num_frames=total_actual_frames,
guidance_scale=guidance_scale,
seed=used_seed,
image_conditions=initial_image_conditions,
ltx_configs_override=ltx_configs_override
)
print("\n--- Finalizando Geração Simples: Salvando e decodificando ---")
log_tensor_info(final_latents, "Tensor de Latentes Final")
tensor_path = os.path.join(results_dir, f"latents_single_{used_seed}.pt")
torch.save(final_latents.cpu(), tensor_path)
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
pixel_tensor = vae_manager_singleton.decode(final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
video_path = self._save_and_log_video(pixel_tensor, "single_video", FPS, temp_dir, results_dir, used_seed)
self.finalize(keep_paths=[video_path, tensor_path])
return video_path, tensor_path, used_seed
def generate_upscale_denoise(self, latents_path, prompt, negative_prompt, guidance_scale, seed):
used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
seed_everething(used_seed)
temp_dir = tempfile.mkdtemp(prefix="ltxv_up_"); self._register_tmp_dir(temp_dir)
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
latents_low = torch.load(latents_path).to(self.device)
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
upsampled_latents = self._upsample_latents_internal(latents_low)
upsampled_latents = adain_filter_latent(latents=upsampled_latents, reference_latents=latents_low)
del latents_low; torch.cuda.empty_cache()
# --- LÓGICA DE DIVISÃO SIMPLES COM OVERLAP ---
total_frames = upsampled_latents.shape[2]
# Garante que mid_point seja pelo menos 1 para evitar um segundo chunk vazio se houver poucos frames
mid_point = max(1, total_frames // 2)
chunk1 = upsampled_latents[:, :, :mid_point, :, :]
# O segundo chunk começa um frame antes para criar o overlap
chunk2 = upsampled_latents[:, :, mid_point - 1:, :, :]
final_latents_list = []
for i, chunk in enumerate([chunk1, chunk2]):
if chunk.shape[2] <= 1: continue # Pula chunks inválidos ou vazios
second_pass_height = chunk.shape[3] * self.pipeline.vae_scale_factor
second_pass_width = chunk.shape[4] * self.pipeline.vae_scale_factor
second_pass_kwargs = {
"prompt": prompt, "negative_prompt": negative_prompt, "height": second_pass_height, "width": second_pass_width,
"num_frames": chunk.shape[2], "latents": chunk, "guidance_scale": float(guidance_scale),
"output_type": "latent", "generator": torch.Generator(device=self.device).manual_seed(used_seed),
**(self.config.get("second_pass", {}))
}
refined_chunk = self.pipeline(**second_pass_kwargs).images
# Remove o overlap do primeiro chunk refinado antes de juntar
if i == 0:
final_latents_list.append(refined_chunk[:, :, :-1, :, :])
else:
final_latents_list.append(refined_chunk)
final_latents = torch.cat(final_latents_list, dim=2)
log_tensor_info(final_latents, "Latentes Upscaled/Refinados Finais")
latents_cpu = final_latents.detach().to("cpu")
tensor_path = os.path.join(results_dir, f"latents_refined_{used_seed}.pt")
torch.save(latents_cpu, tensor_path)
pixel_tensor = vae_manager_singleton.decode(final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
video_path = self._save_and_log_video(pixel_tensor, "refined_video", 24.0, temp_dir, results_dir, used_seed)
return video_path, tensor_path
def encode_mp4(self, latents_path: str, fps: int = 24):
latents = torch.load(latents_path)
seed = random.randint(0, 99999)
temp_dir = tempfile.mkdtemp(prefix="ltxv_enc_"); self._register_tmp_dir(temp_dir)
results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
# --- LÓGICA DE DIVISÃO SIMPLES COM OVERLAP ---
total_frames = latents.shape[2]
mid_point = max(1, total_frames // 2)
chunk1_latents = latents[:, :, :mid_point, :, :]
chunk2_latents = latents[:, :, mid_point - 1:, :, :]
video_parts = []
pixel_chunks_to_concat = []
with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
for i, chunk in enumerate([chunk1_latents, chunk2_latents]):
if chunk.shape[2] == 0: continue
pixel_chunk = vae_manager_singleton.decode(chunk.to(self.device), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
# Remove o overlap do primeiro chunk de pixels
if i == 0:
pixel_chunks_to_concat.append(pixel_chunk[:, :, :-1, :, :])
else:
pixel_chunks_to_concat.append(pixel_chunk)
final_pixel_tensor = torch.cat(pixel_chunks_to_concat, dim=2)
final_video_path = self._save_and_log_video(final_pixel_tensor, f"final_concatenated_{seed}", fps, temp_dir, results_dir, seed)
return final_video_path
def __init__(self):
t0 = time.perf_counter()
print("[DEBUG] Inicializando VideoService...")
# 1. Obter o dispositivo alvo a partir do gerenciador
# Não definimos `self.device` ainda, apenas guardamos o alvo.
target_device = gpu_manager.get_ltx_device()
print(f"[DEBUG] LTX foi alocado para o dispositivo: {target_device}")
# 2. Carregar a configuração e os modelos (na CPU, como a função _load_models faz)
self.config = self._load_config()
self.pipeline, self.latent_upsampler = self._load_models()
# 3. Mover os modelos para o dispositivo alvo e definir `self.device`
self.move_to_device(target_device) # Usando a função que já criamos!
# 4. Configurar o resto dos componentes com o dispositivo correto
self._apply_precision_policy()
vae_manager_singleton.attach_pipeline(
self.pipeline,
device=self.device, # Agora `self.device` está correto
autocast_dtype=self.runtime_autocast_dtype
)
self._tmp_dirs = set()
print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
# A função move_to_device que criamos antes é essencial aqui
def move_to_device(self, device):
"""Move os modelos do pipeline para o dispositivo especificado."""
print(f"[LTX] Movendo modelos para {device}...")
self.device = torch.device(device) # Garante que é um objeto torch.device
self.pipeline.to(self.device)
if self.latent_upsampler:
self.latent_upsampler.to(self.device)
print(f"[LTX] Modelos agora estão em {self.device}.")
def move_to_cpu(self):
"""Move os modelos para a CPU para liberar VRAM."""
self.move_to_device(torch.device("cpu"))
if torch.cuda.is_available():
torch.cuda.empty_cache()
# Instanciação limpa, sem usar `self` fora da classe.
print("Criando instância do VideoService...")
video_generation_service = VideoService()
print("Instância do VideoService pronta.")