EuuIia commited on
Commit
fd4abdb
·
verified ·
1 Parent(s): 9848ab3

Upload ltx_server (8).py

Browse files
Files changed (1) hide show
  1. api/ltx_server (8).py +840 -0
api/ltx_server (8).py ADDED
@@ -0,0 +1,840 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ltx_server.py — VideoService (beta 1.1)
2
+ # Sempre output_type="latent"; no final: VAE (bloco inteiro) → pixels → MP4.
3
+ # Ignora UserWarning/FutureWarning e injeta VAE no manager com dtype/device corretos.
4
+
5
+ # --- 0. WARNINGS E AMBIENTE ---
6
+ import warnings
7
+ warnings.filterwarnings("ignore", category=UserWarning)
8
+ warnings.filterwarnings("ignore", category=FutureWarning)
9
+ warnings.filterwarnings("ignore", message=".*")
10
+
11
+ from huggingface_hub import logging
12
+
13
+ logging.set_verbosity_error()
14
+ logging.set_verbosity_warning()
15
+ logging.set_verbosity_info()
16
+ logging.set_verbosity_debug()
17
+
18
+
19
+ LTXV_DEBUG=1
20
+ LTXV_FRAME_LOG_EVERY=8
21
+
22
+
23
+
24
+ # --- 1. IMPORTAÇÕES ---
25
+ import os, subprocess, shlex, tempfile
26
+ import torch
27
+ import json
28
+ import numpy as np
29
+ import random
30
+ import os
31
+ import shlex
32
+ import yaml
33
+ from typing import List, Dict
34
+ from pathlib import Path
35
+ import imageio
36
+ import tempfile
37
+ from huggingface_hub import hf_hub_download
38
+ import sys
39
+ import subprocess
40
+ import gc
41
+ import shutil
42
+ import contextlib
43
+ import time
44
+ import traceback
45
+
46
+ # Singletons (versões simples)
47
+ from managers.vae_manager import vae_manager_singleton
48
+ from tools.video_encode_tool import video_encode_tool_singleton
49
+
50
+ # --- 2. GERENCIAMENTO DE DEPENDÊNCIAS E SETUP ---
51
+ def _query_gpu_processes_via_nvml(device_index: int) -> List[Dict]:
52
+ try:
53
+ import psutil
54
+ import pynvml as nvml
55
+ nvml.nvmlInit()
56
+ handle = nvml.nvmlDeviceGetHandleByIndex(device_index)
57
+ try:
58
+ procs = nvml.nvmlDeviceGetComputeRunningProcesses_v3(handle)
59
+ except Exception:
60
+ procs = nvml.nvmlDeviceGetComputeRunningProcesses(handle)
61
+ results = []
62
+ for p in procs:
63
+ pid = int(p.pid)
64
+ used_mb = None
65
+ try:
66
+ if getattr(p, "usedGpuMemory", None) is not None and p.usedGpuMemory not in (0,):
67
+ used_mb = max(0, int(p.usedGpuMemory) // (1024 * 1024))
68
+ except Exception:
69
+ used_mb = None
70
+ name = "unknown"
71
+ user = "unknown"
72
+ try:
73
+ import psutil
74
+ pr = psutil.Process(pid)
75
+ name = pr.name()
76
+ user = pr.username()
77
+ except Exception:
78
+ pass
79
+ results.append({"pid": pid, "name": name, "user": user, "used_mb": used_mb})
80
+ nvml.nvmlShutdown()
81
+ return results
82
+ except Exception:
83
+ return []
84
+
85
+ def _query_gpu_processes_via_nvidiasmi(device_index: int) -> List[Dict]:
86
+ cmd = f"nvidia-smi -i {device_index} --query-compute-apps=pid,process_name,used_memory --format=csv,noheader,nounits"
87
+ try:
88
+ out = subprocess.check_output(shlex.split(cmd), stderr=subprocess.STDOUT, text=True, timeout=2.0)
89
+ except Exception:
90
+ return []
91
+ results = []
92
+ for line in out.strip().splitlines():
93
+ parts = [p.strip() for p in line.split(",")]
94
+ if len(parts) >= 3:
95
+ try:
96
+ pid = int(parts[0]); name = parts[1]; used_mb = int(parts[2])
97
+ user = "unknown"
98
+ try:
99
+ import psutil
100
+ pr = psutil.Process(pid)
101
+ user = pr.username()
102
+ except Exception:
103
+ pass
104
+ results.append({"pid": pid, "name": name, "user": user, "used_mb": used_mb})
105
+ except Exception:
106
+ continue
107
+ return results
108
+
109
+ def _gpu_process_table(processes: List[Dict], current_pid: int) -> str:
110
+ if not processes:
111
+ return " - Processos ativos: (nenhum)\n"
112
+ processes = sorted(processes, key=lambda x: (x.get("used_mb") or 0), reverse=True)
113
+ lines = [" - Processos ativos (PID | USER | NAME | VRAM MB):"]
114
+ for p in processes:
115
+ star = "*" if p["pid"] == current_pid else " "
116
+ used_str = str(p["used_mb"]) if p.get("used_mb") is not None else "N/A"
117
+ lines.append(f" {star} {p['pid']} | {p['user']} | {p['name']} | {used_str}")
118
+ return "\n".join(lines) + "\n"
119
+
120
+ def run_setup():
121
+ setup_script_path = "setup.py"
122
+ if not os.path.exists(setup_script_path):
123
+ print("[DEBUG] 'setup.py' não encontrado. Pulando clonagem de dependências.")
124
+ return
125
+ try:
126
+ print("[DEBUG] Executando setup.py para dependências...")
127
+ subprocess.run([sys.executable, setup_script_path], check=True)
128
+ print("[DEBUG] Setup concluído com sucesso.")
129
+ except subprocess.CalledProcessError as e:
130
+ print(f"[DEBUG] ERRO no setup.py (code {e.returncode}). Abortando.")
131
+ sys.exit(1)
132
+
133
+ from api.ltx.inference import (
134
+ create_ltx_video_pipeline,
135
+ create_latent_upsampler,
136
+ load_image_to_tensor_with_resize_and_crop,
137
+ seed_everething,
138
+ calculate_padding,
139
+ load_media_file,
140
+ )
141
+
142
+ DEPS_DIR = Path("/data")
143
+ LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
144
+ if not LTX_VIDEO_REPO_DIR.exists():
145
+ print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
146
+ run_setup()
147
+
148
+ def add_deps_to_path():
149
+ repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
150
+ if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
151
+ sys.path.insert(0, repo_path)
152
+ print(f"[DEBUG] Repo adicionado ao sys.path: {repo_path}")
153
+
154
+ add_deps_to_path()
155
+
156
+ # --- 3. IMPORTAÇÕES ESPECÍFICAS DO MODELO ---
157
+
158
+ from ltx_video.pipelines.pipeline_ltx_video import ConditioningItem, LTXMultiScalePipeline
159
+ from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy
160
+
161
+ # --- 4. FUNÇÕES HELPER DE LOG ---
162
+ def log_tensor_info(tensor, name="Tensor"):
163
+ if not isinstance(tensor, torch.Tensor):
164
+ print(f"\n[INFO] '{name}' não é tensor.")
165
+ return
166
+ print(f"\n--- Tensor: {name} ---")
167
+ print(f" - Shape: {tuple(tensor.shape)}")
168
+ print(f" - Dtype: {tensor.dtype}")
169
+ print(f" - Device: {tensor.device}")
170
+ if tensor.numel() > 0:
171
+ try:
172
+ print(f" - Min: {tensor.min().item():.4f} Max: {tensor.max().item():.4f} Mean: {tensor.mean().item():.4f}")
173
+ except Exception:
174
+ pass
175
+ print("------------------------------------------\n")
176
+
177
+ # --- 5. CLASSE PRINCIPAL DO SERVIÇO ---
178
+ class VideoService:
179
+ def __init__(self):
180
+ t0 = time.perf_counter()
181
+ print("[DEBUG] Inicializando VideoService...")
182
+ self.debug = os.getenv("LTXV_DEBUG", "1") == "1"
183
+ self.frame_log_every = int(os.getenv("LTXV_FRAME_LOG_EVERY", "8"))
184
+ self.config = self._load_config()
185
+ print(f"[DEBUG] Config carregada (precision={self.config.get('precision')}, sampler={self.config.get('sampler')})")
186
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
187
+ print(f"[DEBUG] Device selecionado: {self.device}")
188
+ self.last_memory_reserved_mb = 0.0
189
+ self._tmp_dirs = set(); self._tmp_files = set(); self._last_outputs = []
190
+
191
+ self.pipeline, self.latent_upsampler = self._load_models()
192
+ print(f"[DEBUG] Pipeline e Upsampler carregados. Upsampler ativo? {bool(self.latent_upsampler)}")
193
+
194
+ print(f"[DEBUG] Movendo modelos para {self.device}...")
195
+ self.pipeline.to(self.device)
196
+ if self.latent_upsampler:
197
+ self.latent_upsampler.to(self.device)
198
+
199
+ self._apply_precision_policy()
200
+ print(f"[DEBUG] runtime_autocast_dtype = {getattr(self, 'runtime_autocast_dtype', None)}")
201
+
202
+ # Injeta pipeline/vae no manager (impede vae=None)
203
+ vae_manager_singleton.attach_pipeline(
204
+ self.pipeline,
205
+ device=self.device,
206
+ autocast_dtype=self.runtime_autocast_dtype
207
+ )
208
+ print(f"[DEBUG] VAE manager conectado: has_vae={hasattr(self.pipeline, 'vae')} device={self.device}")
209
+
210
+ if self.device == "cuda":
211
+ torch.cuda.empty_cache()
212
+ self._log_gpu_memory("Após carregar modelos")
213
+
214
+ print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
215
+
216
+ def _log_gpu_memory(self, stage_name: str):
217
+ if self.device != "cuda":
218
+ return
219
+ device_index = torch.cuda.current_device() if torch.cuda.is_available() else 0
220
+ current_reserved_b = torch.cuda.memory_reserved(device_index)
221
+ current_reserved_mb = current_reserved_b / (1024 ** 2)
222
+ total_memory_b = torch.cuda.get_device_properties(device_index).total_memory
223
+ total_memory_mb = total_memory_b / (1024 ** 2)
224
+ peak_reserved_mb = torch.cuda.max_memory_reserved(device_index) / (1024 ** 2)
225
+ delta_mb = current_reserved_mb - getattr(self, "last_memory_reserved_mb", 0.0)
226
+ processes = _query_gpu_processes_via_nvml(device_index) or _query_gpu_processes_via_nvidiasmi(device_index)
227
+ print(f"\n--- [LOG GPU] {stage_name} (cuda:{device_index}) ---")
228
+ print(f" - Reservado: {current_reserved_mb:.2f} MB / {total_memory_mb:.2f} MB (Δ={delta_mb:+.2f} MB)")
229
+ if peak_reserved_mb > getattr(self, "last_memory_reserved_mb", 0.0):
230
+ print(f" - Pico reservado (nesta fase): {peak_reserved_mb:.2f} MB")
231
+ print(_gpu_process_table(processes, os.getpid()), end="")
232
+ print("--------------------------------------------------\n")
233
+ self.last_memory_reserved_mb = current_reserved_mb
234
+
235
+ def _register_tmp_dir(self, d: str):
236
+ if d and os.path.isdir(d):
237
+ self._tmp_dirs.add(d); print(f"[DEBUG] Registrado tmp dir: {d}")
238
+
239
+ def _register_tmp_file(self, f: str):
240
+ if f and os.path.exists(f):
241
+ self._tmp_files.add(f); print(f"[DEBUG] Registrado tmp file: {f}")
242
+
243
+ def finalize(self, keep_paths=None, extra_paths=None, clear_gpu=True):
244
+ print("[DEBUG] Finalize: iniciando limpeza...")
245
+ keep = set(keep_paths or []); extras = set(extra_paths or [])
246
+ removed_files = 0
247
+ for f in list(self._tmp_files | extras):
248
+ try:
249
+ if f not in keep and os.path.isfile(f):
250
+ os.remove(f); removed_files += 1; print(f"[DEBUG] Removido arquivo tmp: {f}")
251
+ except Exception as e:
252
+ print(f"[DEBUG] Falha removendo arquivo {f}: {e}")
253
+ finally:
254
+ self._tmp_files.discard(f)
255
+ removed_dirs = 0
256
+ for d in list(self._tmp_dirs):
257
+ try:
258
+ if d not in keep and os.path.isdir(d):
259
+ shutil.rmtree(d, ignore_errors=True); removed_dirs += 1; print(f"[DEBUG] Removido diretório tmp: {d}")
260
+ except Exception as e:
261
+ print(f"[DEBUG] Falha removendo diretório {d}: {e}")
262
+ finally:
263
+ self._tmp_dirs.discard(d)
264
+ print(f"[DEBUG] Finalize: arquivos removidos={removed_files}, dirs removidos={removed_dirs}")
265
+ gc.collect()
266
+ try:
267
+ if clear_gpu and torch.cuda.is_available():
268
+ torch.cuda.empty_cache()
269
+ try:
270
+ torch.cuda.ipc_collect()
271
+ except Exception:
272
+ pass
273
+ except Exception as e:
274
+ print(f"[DEBUG] Finalize: limpeza GPU falhou: {e}")
275
+ try:
276
+ self._log_gpu_memory("Após finalize")
277
+ except Exception as e:
278
+ print(f"[DEBUG] Log GPU pós-finalize falhou: {e}")
279
+
280
+ def _load_config(self):
281
+ base = LTX_VIDEO_REPO_DIR / "configs"
282
+ candidates = [
283
+ base / "ltxv-13b-0.9.8-dev-fp8.yaml",
284
+ base / "ltxv-13b-0.9.8-distilled-fp8.yaml",
285
+ base / "ltxv-13b-0.9.8-distilled.yaml",
286
+ ]
287
+ for cfg in candidates:
288
+ if cfg.exists():
289
+ print(f"[DEBUG] Config selecionada: {cfg}")
290
+ with open(cfg, "r") as file:
291
+ return yaml.safe_load(file)
292
+ cfg = base / "ltxv-13b-0.9.8-distilled-fp8.yaml"
293
+ print(f"[DEBUG] Config fallback: {cfg}")
294
+ with open(cfg, "r") as file:
295
+ return yaml.safe_load(file)
296
+
297
+ def _load_models(self):
298
+ t0 = time.perf_counter()
299
+ LTX_REPO = "Lightricks/LTX-Video"
300
+ print("[DEBUG] Baixando checkpoint principal...")
301
+ distilled_model_path = hf_hub_download(
302
+ repo_id=LTX_REPO,
303
+ filename=self.config["checkpoint_path"],
304
+ local_dir=os.getenv("HF_HOME"),
305
+ cache_dir=os.getenv("HF_HOME_CACHE"),
306
+ token=os.getenv("HF_TOKEN"),
307
+ )
308
+ self.config["checkpoint_path"] = distilled_model_path
309
+ print(f"[DEBUG] Checkpoint em: {distilled_model_path}")
310
+
311
+ print("[DEBUG] Baixando upscaler espacial...")
312
+ spatial_upscaler_path = hf_hub_download(
313
+ repo_id=LTX_REPO,
314
+ filename=self.config["spatial_upscaler_model_path"],
315
+ local_dir=os.getenv("HF_HOME"),
316
+ cache_dir=os.getenv("HF_HOME_CACHE"),
317
+ token=os.getenv("HF_TOKEN")
318
+ )
319
+ self.config["spatial_upscaler_model_path"] = spatial_upscaler_path
320
+ print(f"[DEBUG] Upscaler em: {spatial_upscaler_path}")
321
+
322
+ print("[DEBUG] Construindo pipeline...")
323
+ pipeline = create_ltx_video_pipeline(
324
+ ckpt_path=self.config["checkpoint_path"],
325
+ precision=self.config["precision"],
326
+ text_encoder_model_name_or_path=self.config["text_encoder_model_name_or_path"],
327
+ sampler=self.config["sampler"],
328
+ device="cpu",
329
+ enhance_prompt=False,
330
+ prompt_enhancer_image_caption_model_name_or_path=self.config["prompt_enhancer_image_caption_model_name_or_path"],
331
+ prompt_enhancer_llm_model_name_or_path=self.config["prompt_enhancer_llm_model_name_or_path"],
332
+ )
333
+ print("[DEBUG] Pipeline pronto.")
334
+
335
+ latent_upsampler = None
336
+ if self.config.get("spatial_upscaler_model_path"):
337
+ print("[DEBUG] Construindo latent_upsampler...")
338
+ latent_upsampler = create_latent_upsampler(self.config["spatial_upscaler_model_path"], device="cpu")
339
+ print("[DEBUG] Upsampler pronto.")
340
+ print(f"[DEBUG] _load_models() tempo total={time.perf_counter()-t0:.3f}s")
341
+ return pipeline, latent_upsampler
342
+
343
+ def _promote_fp8_weights_to_bf16(self, module):
344
+ if not isinstance(module, torch.nn.Module):
345
+ print("[DEBUG] Promoção FP8→BF16 ignorada: alvo não é nn.Module.")
346
+ return
347
+ f8 = getattr(torch, "float8_e4m3fn", None)
348
+ if f8 is None:
349
+ print("[DEBUG] torch.float8_e4m3fn indisponível.")
350
+ return
351
+ p_cnt = b_cnt = 0
352
+ for _, p in module.named_parameters(recurse=True):
353
+ try:
354
+ if p.dtype == f8:
355
+ with torch.no_grad():
356
+ p.data = p.data.to(torch.bfloat16); p_cnt += 1
357
+ except Exception:
358
+ pass
359
+ for _, b in module.named_buffers(recurse=True):
360
+ try:
361
+ if hasattr(b, "dtype") and b.dtype == f8:
362
+ b.data = b.data.to(torch.bfloat16); b_cnt += 1
363
+ except Exception:
364
+ pass
365
+ print(f"[DEBUG] FP8→BF16: params_promoted={p_cnt}, buffers_promoted={b_cnt}")
366
+
367
+ def _apply_precision_policy(self):
368
+ prec = str(self.config.get("precision", "")).lower()
369
+ self.runtime_autocast_dtype = torch.float32
370
+ print(f"[DEBUG] Aplicando política de precisão: {prec}")
371
+ if prec == "float8_e4m3fn":
372
+ self.runtime_autocast_dtype = torch.bfloat16
373
+ force_promote = os.getenv("LTXV_FORCE_BF16_ON_FP8", "0") == "1"
374
+ print(f"[DEBUG] FP8 detectado. force_promote={force_promote}")
375
+ if force_promote and hasattr(torch, "float8_e4m3fn"):
376
+ try:
377
+ self._promote_fp8_weights_to_bf16(self.pipeline)
378
+ except Exception as e:
379
+ print(f"[DEBUG] Promoção FP8→BF16 na pipeline falhou: {e}")
380
+ try:
381
+ if self.latent_upsampler:
382
+ self._promote_fp8_weights_to_bf16(self.latent_upsampler)
383
+ except Exception as e:
384
+ print(f"[DEBUG] Promoção FP8→BF16 no upsampler falhou: {e}")
385
+ elif prec == "bfloat16":
386
+ self.runtime_autocast_dtype = torch.bfloat16
387
+ elif prec == "mixed_precision":
388
+ self.runtime_autocast_dtype = torch.float16
389
+ else:
390
+ self.runtime_autocast_dtype = torch.float32
391
+
392
+ def _prepare_conditioning_tensor(self, filepath, height, width, padding_values):
393
+ print(f"[DEBUG] Carregando condicionamento: {filepath}")
394
+ tensor = load_image_to_tensor_with_resize_and_crop(filepath, height, width)
395
+ tensor = torch.nn.functional.pad(tensor, padding_values)
396
+ out = tensor.to(self.device, dtype=self.runtime_autocast_dtype) if self.device == "cuda" else tensor.to(self.device)
397
+ print(f"[DEBUG] Cond shape={tuple(out.shape)} dtype={out.dtype} device={out.device}")
398
+ return out
399
+
400
+
401
+ def _dividir_latentes_por_tamanho(self, latents_brutos, num_latente_por_chunk: int, overlap: int = 1):
402
+ """
403
+ Divide o tensor de latentes em chunks com tamanho definido em número de latentes.
404
+
405
+ Args:
406
+ latents_brutos: tensor [B, C, T, H, W]
407
+ num_latente_por_chunk: número de latentes por chunk
408
+ overlap: número de frames que se sobrepõem entre chunks
409
+
410
+ Returns:
411
+ List[tensor]: lista de chunks cloneados
412
+ """
413
+ sum_latent = latents_brutos.shape[2]
414
+ chunks = []
415
+
416
+ if num_latente_por_chunk >= sum_latent:
417
+ return [latents_brutos]
418
+
419
+ n_chunks = (sum_latent) // num_latente_por_chunk
420
+ steps = sum_latent//n_chunks
421
+ print("================PODA CAUSAL=================")
422
+ print(f"[DEBUG] TOTAL LATENTES = {sum_latent}")
423
+ print(f"[DEBUG] Num LATENTES por chunk = {num_latente_por_chunk}")
424
+ print(f"[DEBUG] Número de chunks = {n_chunks}")
425
+
426
+
427
+
428
+ if n_chunks > 1:
429
+ start = 0
430
+ i = 0
431
+ end=1
432
+ while i < n_chunks:
433
+ start += end
434
+ end = start+num_latente_por_chunk
435
+ if end+3>=sum_latent-1:
436
+ end = (sum_latent-1)
437
+ i = n_chunks
438
+ else:
439
+ i += 1
440
+ chunk = latents_brutos[:, :, start-1:end, :, :].clone().detach()
441
+ chunks.append(chunk)
442
+ print(f"[DEBUG] chunk{i}[:, :, {start-1}:{end}, :, :] = {chunk.shape[2]}")
443
+ else:
444
+ print(f"[DEBUG] numero chunks minimo")
445
+ print(f"[DEBUG] latents_brutos[:, :, :, :, :] = {latents_brutos.shape[2]}")
446
+ chunks.append(latents_brutos)
447
+ print("================PODA CAUSAL=================")
448
+ return chunks
449
+
450
+ def _get_total_frames(self, video_path: str) -> int:
451
+ cmd = [
452
+ "ffprobe",
453
+ "-v", "error",
454
+ "-select_streams", "v:0",
455
+ "-count_frames",
456
+ "-show_entries", "stream=nb_read_frames",
457
+ "-of", "default=nokey=1:noprint_wrappers=1",
458
+ video_path
459
+ ]
460
+ result = subprocess.run(cmd, capture_output=True, text=True, check=True)
461
+ return int(result.stdout.strip())
462
+
463
+
464
+
465
+ def _gerar_lista_com_transicoes(self, pasta: str, video_paths: list[str], crossfade_frames: int = 8) -> list[str]:
466
+ """
467
+ Gera uma nova lista de vídeos aplicando transições suaves (blend frame a frame)
468
+ seguindo exatamente a lógica linear de Carlos.
469
+ """
470
+ import os, subprocess, shutil
471
+
472
+ poda = crossfade_frames
473
+ total_partes = len(video_paths)
474
+ video_fade_fim = None
475
+ video_fade_ini = None
476
+ nova_lista = []
477
+
478
+ print("===========CONCATECAO CAUSAL=============")
479
+
480
+ print(f"[DEBUG] Iniciando pipeline com {total_partes} vídeos e {poda} frames de crossfade")
481
+
482
+ for i in range(total_partes):
483
+ base = video_paths[i]
484
+
485
+ # --- PODA ---
486
+ video_podado = os.path.join(pasta, f"{base}_podado_{i}.mp4")
487
+
488
+
489
+ if i<total_partes-1:
490
+ end_frame = self._get_total_frames(base) - poda
491
+ else:
492
+ end_frame = self._get_total_frames(base)
493
+
494
+ if i>0:
495
+ start_frame = poda
496
+ else:
497
+ start_frame = 0
498
+
499
+ cmd_fim = (
500
+ f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
501
+ f'-vf "trim=start_frame={start_frame}:end_frame={end_frame},setpts=PTS-STARTPTS" '
502
+ f'-an "{video_podado}"'
503
+ )
504
+ subprocess.run(cmd_fim, shell=True, check=True)
505
+
506
+
507
+ # --- FADE_INI ---
508
+ if i > 0:
509
+ video_fade_ini = os.path.join(pasta, f"{base}_fade_ini_{i}.mp4")
510
+ cmd_ini = (
511
+ f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
512
+ f'-vf "trim=end_frame={poda},setpts=PTS-STARTPTS" -an "{video_fade_ini}"'
513
+ )
514
+ subprocess.run(cmd_ini, shell=True, check=True)
515
+
516
+ # --- TRANSIÇÃO ---
517
+ if video_fade_fim and video_fade_ini:
518
+ video_fade = os.path.join(pasta, f"transicao_{i}_{i+1}.mp4")
519
+ cmd_blend = (
520
+ f'ffmpeg -y -hide_banner -loglevel error '
521
+ f'-i "{video_fade_fim}" -i "{video_fade_ini}" '
522
+ f'-filter_complex "[0:v][1:v]blend=all_expr=\'A*(1-T/{poda})+B*(T/{poda})\',format=yuv420p" '
523
+ f'-frames:v {poda} "{video_fade}"'
524
+ )
525
+ subprocess.run(cmd_blend, shell=True, check=True)
526
+ print(f"[DEBUG] transicao adicionada {i}/{i+1} {self._get_total_frames(video_fade)} frames ✅")
527
+ nova_lista.append(video_fade)
528
+
529
+ # --- FADE_FIM ---
530
+ if i<=total_partes-1:
531
+ video_fade_fim = os.path.join(pasta, f"{base}_fade_fim_{i}.mp4")
532
+ cmd_fim = (
533
+ f'ffmpeg -y -hide_banner -loglevel error -i "{base}" '
534
+ f'-vf "trim=start_frame={end_frame-poda},setpts=PTS-STARTPTS" -an "{video_fade_fim}"'
535
+ )
536
+ subprocess.run(cmd_fim, shell=True, check=True)
537
+
538
+ nova_lista.append(video_podado)
539
+ print(f"[DEBUG] Video podado {i+1} adicionado {self._get_total_frames(video_podado)} frames ✅")
540
+
541
+
542
+
543
+ print("===========CONCATECAO CAUSAL=============")
544
+ print(f"[DEBUG] {nova_lista}")
545
+ return nova_lista
546
+
547
+ def _concat_mp4s_no_reencode(self, mp4_list: List[str], out_path: str):
548
+ """
549
+ Concatena múltiplos MP4s sem reencode usando o demuxer do ffmpeg.
550
+ ATENÇÃO: todos os arquivos precisam ter mesmo codec, fps, resolução etc.
551
+ """
552
+ if not mp4_list or len(mp4_list) < 2:
553
+ raise ValueError("Forneça pelo menos dois arquivos MP4 para concatenar.")
554
+
555
+
556
+ # Cria lista temporária para o ffmpeg
557
+ with tempfile.NamedTemporaryFile("w", delete=False, suffix=".txt") as f:
558
+ for mp4 in mp4_list:
559
+ f.write(f"file '{os.path.abspath(mp4)}'\n")
560
+ list_path = f.name
561
+
562
+ cmd = f"ffmpeg -y -f concat -safe 0 -i {list_path} -c copy {out_path}"
563
+ print(f"[DEBUG] Concat: {cmd}")
564
+
565
+ try:
566
+ subprocess.check_call(shlex.split(cmd))
567
+ finally:
568
+ try:
569
+ os.remove(list_path)
570
+ except Exception:
571
+ pass
572
+
573
+
574
+ def generate(
575
+ self,
576
+ prompt,
577
+ negative_prompt,
578
+ mode="text-to-video",
579
+ start_image_filepath=None,
580
+ middle_image_filepath=None,
581
+ middle_frame_number=None,
582
+ middle_image_weight=1.0,
583
+ end_image_filepath=None,
584
+ end_image_weight=1.0,
585
+ input_video_filepath=None,
586
+ height=512,
587
+ width=704,
588
+ duration=2.0,
589
+ frames_to_use=9,
590
+ seed=42,
591
+ randomize_seed=True,
592
+ guidance_scale=3.0,
593
+ improve_texture=True,
594
+ progress_callback=None,
595
+ # Sempre latent → VAE → MP4 (simples)
596
+ external_decode=True,
597
+ ):
598
+ t_all = time.perf_counter()
599
+ print(f"[DEBUG] generate() begin mode={mode} external_decode={external_decode} improve_texture={improve_texture}")
600
+ if self.device == "cuda":
601
+ torch.cuda.empty_cache(); torch.cuda.reset_peak_memory_stats()
602
+ self._log_gpu_memory("Início da Geração")
603
+
604
+ if mode == "image-to-video" and not start_image_filepath:
605
+ raise ValueError("A imagem de início é obrigatória para o modo image-to-video")
606
+ if mode == "video-to-video" and not input_video_filepath:
607
+ raise ValueError("O vídeo de entrada é obrigatório para o modo video-to-video")
608
+
609
+ used_seed = random.randint(0, 2**32 - 1) if randomize_seed else int(seed)
610
+ seed_everething(used_seed); print(f"[DEBUG] Seed usado: {used_seed}")
611
+
612
+ FPS = 24.0; MAX_NUM_FRAMES = 2570
613
+ target_frames_rounded = round(duration * FPS)
614
+ n_val = round((float(target_frames_rounded) - 1.0) / 8.0)
615
+ actual_num_frames = max(9, min(MAX_NUM_FRAMES, int(n_val * 8 + 1)))
616
+ print(f"[DEBUG] Frames alvo: {actual_num_frames} (dur={duration}s @ {FPS}fps)")
617
+
618
+ height_padded = ((height - 1) // 32 + 1) * 32
619
+ width_padded = ((width - 1) // 32 + 1) * 32
620
+ padding_values = calculate_padding(height, width, height_padded, width_padded)
621
+ print(f"[DEBUG] Dimensões: ({height},{width}) -> pad ({height_padded},{width_padded}); padding={padding_values}")
622
+
623
+ generator = torch.Generator(device=self.device).manual_seed(used_seed)
624
+ conditioning_items = []
625
+
626
+ if mode == "image-to-video":
627
+ start_tensor = self._prepare_conditioning_tensor(start_image_filepath, height, width, padding_values)
628
+ conditioning_items.append(ConditioningItem(start_tensor, 0, 1.0))
629
+ if middle_image_filepath and middle_frame_number is not None:
630
+ middle_tensor = self._prepare_conditioning_tensor(middle_image_filepath, height, width, padding_values)
631
+ safe_middle_frame = max(0, min(int(middle_frame_number), actual_num_frames - 1))
632
+ conditioning_items.append(ConditioningItem(middle_tensor, safe_middle_frame, float(middle_image_weight)))
633
+ if end_image_filepath:
634
+ end_tensor = self._prepare_conditioning_tensor(end_image_filepath, height, width, padding_values)
635
+ last_frame_index = actual_num_frames - 1
636
+ conditioning_items.append(ConditioningItem(end_tensor, last_frame_index, float(end_image_weight)))
637
+ print(f"[DEBUG] Conditioning items: {len(conditioning_items)}")
638
+
639
+ # Sempre pedimos latentes (simples)
640
+ call_kwargs = {
641
+ "prompt": prompt,
642
+ "negative_prompt": negative_prompt,
643
+ "height": height_padded,
644
+ "width": width_padded,
645
+ "num_frames": actual_num_frames,
646
+ "frame_rate": int(FPS),
647
+ "generator": generator,
648
+ "output_type": "latent",
649
+ "conditioning_items": conditioning_items if conditioning_items else None,
650
+ "media_items": None,
651
+ "decode_timestep": self.config["decode_timestep"],
652
+ "decode_noise_scale": self.config["decode_noise_scale"],
653
+ "stochastic_sampling": self.config["stochastic_sampling"],
654
+ "image_cond_noise_scale": 0.01,
655
+ "is_video": True,
656
+ "vae_per_channel_normalize": True,
657
+ "mixed_precision": (self.config["precision"] == "mixed_precision"),
658
+ "offload_to_cpu": False,
659
+ "enhance_prompt": False,
660
+ "skip_layer_strategy": SkipLayerStrategy.AttentionValues,
661
+ }
662
+ print(f"[DEBUG] output_type={call_kwargs['output_type']} skip_layer_strategy={call_kwargs['skip_layer_strategy']}")
663
+
664
+ if mode == "video-to-video":
665
+ media = load_media_file(
666
+ media_path=input_video_filepath,
667
+ height=height,
668
+ width=width,
669
+ max_frames=int(frames_to_use),
670
+ padding=padding_values,
671
+ ).to(self.device)
672
+ call_kwargs["media_items"] = media
673
+ print(f"[DEBUG] media_items shape={tuple(media.shape)}")
674
+
675
+ latents = None
676
+ multi_scale_pipeline = None
677
+
678
+ try:
679
+ if improve_texture:
680
+ if not self.latent_upsampler:
681
+ raise ValueError("Upscaler espacial não carregado.")
682
+ print("[DEBUG] Multi-escala: construindo pipeline...")
683
+ multi_scale_pipeline = LTXMultiScalePipeline(self.pipeline, self.latent_upsampler)
684
+ first_pass_args = self.config.get("first_pass", {}).copy()
685
+ first_pass_args["guidance_scale"] = float(guidance_scale)
686
+ second_pass_args = self.config.get("second_pass", {}).copy()
687
+ second_pass_args["guidance_scale"] = float(guidance_scale)
688
+
689
+ multi_scale_call_kwargs = call_kwargs.copy()
690
+ multi_scale_call_kwargs.update(
691
+ {
692
+ "downscale_factor": self.config["downscale_factor"],
693
+ "first_pass": first_pass_args,
694
+ "second_pass": second_pass_args,
695
+ }
696
+ )
697
+ print("[DEBUG] Chamando multi_scale_pipeline...")
698
+ t_ms = time.perf_counter()
699
+ ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
700
+ with ctx:
701
+ result = multi_scale_pipeline(**multi_scale_call_kwargs)
702
+ print(f"[DEBUG] multi_scale_pipeline tempo={time.perf_counter()-t_ms:.3f}s")
703
+
704
+ if hasattr(result, "latents"):
705
+ latents = result.latents
706
+ elif hasattr(result, "images") and isinstance(result.images, torch.Tensor):
707
+ latents = result.images
708
+ else:
709
+ latents = result
710
+ print(f"[DEBUG] Latentes (multi-escala): shape={tuple(latents.shape)}")
711
+ else:
712
+ single_pass_kwargs = call_kwargs.copy()
713
+ first_pass_config = self.config.get("first_pass", {})
714
+ single_pass_kwargs.update(
715
+ {
716
+ "guidance_scale": float(guidance_scale),
717
+ "stg_scale": first_pass_config.get("stg_scale"),
718
+ "rescaling_scale": first_pass_config.get("rescaling_scale"),
719
+ "skip_block_list": first_pass_config.get("skip_block_list"),
720
+ }
721
+ )
722
+ schedule = first_pass_config.get("timesteps") or first_pass_config.get("guidance_timesteps")
723
+ if mode == "video-to-video":
724
+ schedule = [0.7]; print("[INFO] Modo video-to-video (etapa única): timesteps=[0.7]")
725
+ if isinstance(schedule, (list, tuple)) and len(schedule) > 0:
726
+ single_pass_kwargs["timesteps"] = schedule
727
+ single_pass_kwargs["guidance_timesteps"] = schedule
728
+ print(f"[DEBUG] Single-pass: timesteps_len={len(schedule) if schedule else 0}")
729
+
730
+ print("\n[INFO] Executando pipeline de etapa única...")
731
+ t_sp = time.perf_counter()
732
+ ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
733
+ with ctx:
734
+ result = self.pipeline(**single_pass_kwargs)
735
+ print(f"[DEBUG] single-pass tempo={time.perf_counter()-t_sp:.3f}s")
736
+
737
+ if hasattr(result, "latents"):
738
+ latents = result.latents
739
+ elif hasattr(result, "images") and isinstance(result.images, torch.Tensor):
740
+ latents = result.images
741
+ else:
742
+ latents = result
743
+ print(f"[DEBUG] Latentes (single-pass): shape={tuple(latents.shape)}")
744
+
745
+ # Staging e escrita MP4 (simples: VAE → pixels → MP4)
746
+
747
+ latents_cpu = latents.detach().to("cpu", non_blocking=True)
748
+ torch.cuda.empty_cache()
749
+ try:
750
+ torch.cuda.ipc_collect()
751
+ except Exception:
752
+ pass
753
+
754
+ latents_parts = self._dividir_latentes_por_tamanho(latents_cpu,4,1)
755
+
756
+ temp_dir = tempfile.mkdtemp(prefix="ltxv_"); self._register_tmp_dir(temp_dir)
757
+ results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
758
+
759
+ partes_mp4 = []
760
+ par = 0
761
+
762
+ for latents in latents_parts:
763
+ print(f"[DEBUG] Partição {par}: {tuple(latents.shape)}")
764
+
765
+ par = par + 1
766
+ output_video_path = os.path.join(temp_dir, f"output_{used_seed}_{par}.mp4")
767
+ final_output_path = None
768
+
769
+ print("[DEBUG] Decodificando bloco de latentes com VAE → tensor de pixels...")
770
+ # Usar manager com timestep por item; previne target_shape e rota NoneType.decode
771
+ pixel_tensor = vae_manager_singleton.decode(
772
+ latents.to(self.device, non_blocking=True),
773
+ decode_timestep=float(self.config.get("decode_timestep", 0.05))
774
+ )
775
+ log_tensor_info(pixel_tensor, "Pixel tensor (VAE saída)")
776
+
777
+ print("[DEBUG] Codificando MP4 a partir do tensor de pixels (bloco inteiro)...")
778
+ video_encode_tool_singleton.save_video_from_tensor(
779
+ pixel_tensor,
780
+ output_video_path,
781
+ fps=call_kwargs["frame_rate"],
782
+ progress_callback=progress_callback
783
+ )
784
+
785
+ candidate = os.path.join(results_dir, f"output_par_{par}.mp4")
786
+ try:
787
+ shutil.move(output_video_path, candidate)
788
+ final_output_path = candidate
789
+ print(f"[DEBUG] MP4 parte {par} movido para {final_output_path}")
790
+ partes_mp4.append(final_output_path)
791
+
792
+ except Exception as e:
793
+ final_output_path = output_video_path
794
+ print(f"[DEBUG] Falha no move; usando tmp como final: {e}")
795
+
796
+ total_partes = len(partes_mp4)
797
+ if (total_partes>1):
798
+ final_vid = os.path.join(results_dir, f"concat_fim_{used_seed}.mp4")
799
+ partes_mp4_fade = self._gerar_lista_com_transicoes(pasta=results_dir, video_paths=partes_mp4, crossfade_frames=8)
800
+ self._concat_mp4s_no_reencode(partes_mp4_fade, final_vid)
801
+ else:
802
+ final_vid = partes_mp4[0]
803
+
804
+
805
+ self._log_gpu_memory("Fim da Geração")
806
+ return final_vid, used_seed
807
+
808
+
809
+ except Exception as e:
810
+ print("[DEBUG] EXCEÇÃO NA GERAÇÃO:")
811
+ print("".join(traceback.format_exception(type(e), e, e.__traceback__)))
812
+ raise
813
+ finally:
814
+ try:
815
+ del latents
816
+ except Exception:
817
+ pass
818
+ try:
819
+ del multi_scale_pipeline
820
+ except Exception:
821
+ pass
822
+
823
+ gc.collect()
824
+ try:
825
+ if self.device == "cuda":
826
+ torch.cuda.empty_cache()
827
+ try:
828
+ torch.cuda.ipc_collect()
829
+ except Exception:
830
+ pass
831
+ except Exception as e:
832
+ print(f"[DEBUG] Limpeza GPU no finally falhou: {e}")
833
+
834
+ try:
835
+ self.finalize(keep_paths=[])
836
+ except Exception as e:
837
+ print(f"[DEBUG] finalize() no finally falhou: {e}")
838
+
839
+ print("Criando instância do VideoService. O carregamento do modelo começará agora...")
840
+ video_generation_service = VideoService()