eeuuia commited on
Commit
9185209
·
verified ·
1 Parent(s): 8eeab53

Update api/ltx/ltx_utils.py

Browse files
Files changed (1) hide show
  1. api/ltx/ltx_utils.py +173 -327
api/ltx/ltx_utils.py CHANGED
@@ -1,361 +1,207 @@
1
- # FILE: api/ltx_server_refactored_complete.py
2
- # DESCRIPTION: Final high-level orchestrator for LTX-Video generation.
3
- # This version features a unified generation workflow, random seed generation,
4
- # delegation to specialized modules, and advanced debugging capabilities.
5
 
6
- import gc
 
7
  import json
8
  import logging
9
- import os
10
- import shutil
11
- import sys
12
- import tempfile
13
  import time
 
14
  from pathlib import Path
15
- from typing import Dict, List, Optional, Tuple
 
 
16
 
17
- import torch
18
- import yaml
19
  import numpy as np
20
- from huggingface_hub import hf_hub_download
 
 
 
 
21
 
22
  # ==============================================================================
23
- # --- SETUP E IMPORTAÇÕES DO PROJETO ---
24
  # ==============================================================================
25
 
26
- # Configuração de logging e supressão de warnings
27
- import warnings
28
- warnings.filterwarnings("ignore")
29
- logging.getLogger("huggingface_hub").setLevel(logging.ERROR)
30
- log_level = os.environ.get("ADUC_LOG_LEVEL", "INFO").upper()
31
- logging.basicConfig(level=log_level, format='[%(levelname)s] [%(name)s] %(message)s')
32
-
33
- # --- Constantes de Configuração ---
34
- DEPS_DIR = Path("/data")
35
- LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
36
- RESULTS_DIR = Path("/app/output")
37
- DEFAULT_FPS = 24.0
38
- FRAMES_ALIGNMENT = 8
39
- LTX_REPO_ID = "Lightricks/LTX-Video"
40
-
41
- # Garante que a biblioteca LTX-Video seja importável
42
  def add_deps_to_path():
 
 
 
 
43
  repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
44
  if repo_path not in sys.path:
45
  sys.path.insert(0, repo_path)
46
- logging.info(f"[ltx_server] LTX-Video repository added to sys.path: {repo_path}")
47
 
 
48
  add_deps_to_path()
49
 
50
- # --- Módulos da nossa Arquitetura ---
 
 
 
51
  try:
52
- from api.gpu_manager import gpu_manager
53
- from managers.vae_manager import vae_manager_singleton
54
- from tools.video_encode_tool import video_encode_tool_singleton
55
- from api.utils.debug_utils import log_function_io
 
 
 
 
56
  except ImportError as e:
57
- logging.critical(f"A crucial import from the local API/architecture failed. Error: {e}", exc_info=True)
58
- sys.exit(1)
59
 
60
  # ==============================================================================
61
- # --- FUNÇÕES AUXILIARES DO ORQUESTRADOR ---
62
  # ==============================================================================
63
 
64
- @log_function_io
65
- def calculate_padding(orig_h: int, orig_w: int, target_h: int, target_w: int) -> Tuple[int, int, int, int]:
66
- """Calculates symmetric padding required to meet target dimensions."""
67
- pad_h = target_h - orig_h
68
- pad_w = target_w - orig_w
69
- pad_top = pad_h // 2
70
- pad_bottom = pad_h - pad_top
71
- pad_left = pad_w // 2
72
- pad_right = pad_w - pad_left
73
- return (pad_left, pad_right, pad_top, pad_bottom)
 
 
 
 
 
 
 
74
 
75
  # ==============================================================================
76
- # --- CLASSE DE SERVIÇO (O ORQUESTRADOR) ---
77
  # ==============================================================================
78
 
79
- class VideoService:
80
- """
81
- Orchestrates the high-level logic of video generation, delegating low-level
82
- tasks to specialized managers and utility modules.
83
- """
84
-
85
- @log_function_io
86
- def __init__(self):
87
- t0 = time.perf_counter()
88
- logging.info("Initializing VideoService Orchestrator...")
89
- RESULTS_DIR.mkdir(parents=True, exist_ok=True)
90
-
91
- target_main_device_str = str(gpu_manager.get_ltx_device())
92
- target_vae_device_str = str(gpu_manager.get_ltx_vae_device())
93
- logging.info(f"LTX allocated to devices: Main='{target_main_device_str}', VAE='{target_vae_device_str}'")
94
-
95
- self.config = self._load_config()
96
- self._resolve_model_paths_from_cache()
97
-
98
- self.pipeline, self.latent_upsampler = build_ltx_pipeline_on_cpu(self.config)
99
-
100
- self.main_device = torch.device("cpu")
101
- self.vae_device = torch.device("cpu")
102
- self.move_to_device(main_device_str=target_main_device_str, vae_device_str=target_vae_device_str)
103
-
104
- self._apply_precision_policy()
105
- vae_manager_singleton.attach_pipeline(self.pipeline, device=self.vae_device, autocast_dtype=self.runtime_autocast_dtype)
106
- logging.info(f"VideoService ready. Startup time: {time.perf_counter()-t0:.2f}s")
107
-
108
- def _load_config(self) -> Dict:
109
- """Loads the YAML configuration file."""
110
- config_path = LTX_VIDEO_REPO_DIR / "configs" / "ltxv-13b-0.9.8-distilled-fp8.yaml"
111
- logging.info(f"Loading config from: {config_path}")
112
- with open(config_path, "r") as file:
113
- return yaml.safe_load(file)
114
-
115
- def _resolve_model_paths_from_cache(self):
116
- """Finds the absolute paths to model files in the cache and updates the in-memory config."""
117
- logging.info("Resolving model paths from Hugging Face cache...")
118
- cache_dir = os.environ.get("HF_HOME")
119
- try:
120
- main_ckpt_path = hf_hub_download(repo_id=LTX_REPO_ID, filename=self.config["checkpoint_path"], cache_dir=cache_dir)
121
- self.config["checkpoint_path"] = main_ckpt_path
122
- logging.info(f" -> Main checkpoint resolved to: {main_ckpt_path}")
123
-
124
- if self.config.get("spatial_upscaler_model_path"):
125
- upscaler_path = hf_hub_download(repo_id=LTX_REPO_ID, filename=self.config["spatial_upscaler_model_path"], cache_dir=cache_dir)
126
- self.config["spatial_upscaler_model_path"] = upscaler_path
127
- logging.info(f" -> Spatial upscaler resolved to: {upscaler_path}")
128
- except Exception as e:
129
- logging.critical(f"Failed to resolve model paths. Ensure setup.py ran correctly. Error: {e}", exc_info=True)
130
- sys.exit(1)
131
-
132
- @log_function_io
133
- def move_to_device(self, main_device_str: str, vae_device_str: str):
134
- """Moves pipeline components to their designated target devices."""
135
- target_main_device = torch.device(main_device_str)
136
- target_vae_device = torch.device(vae_device_str)
137
- logging.info(f"Moving LTX models -> Main Pipeline: {target_main_device}, VAE: {target_vae_device}")
138
-
139
- self.main_device = target_main_device
140
- self.pipeline.to(self.main_device)
141
- self.vae_device = target_vae_device
142
- self.pipeline.vae.to(self.vae_device)
143
- if self.latent_upsampler: self.latent_upsampler.to(self.main_device)
144
- logging.info("LTX models successfully moved to target devices.")
145
-
146
- def move_to_cpu(self):
147
- """Moves all LTX components to CPU to free VRAM for other services."""
148
- self.move_to_device(main_device_str="cpu", vae_device_str="cpu")
149
- if torch.cuda.is_available(): torch.cuda.empty_cache()
150
-
151
- def finalize(self):
152
- """Cleans up GPU memory after a generation task."""
153
- gc.collect()
154
- if torch.cuda.is_available():
155
- torch.cuda.empty_cache()
156
- try: torch.cuda.ipc_collect();
157
- except Exception: pass
158
-
159
- # ==========================================================================
160
- # --- LÓGICA DE NEGÓCIO: ORQUESTRADOR PÚBLICO UNIFICADO ---
161
- # ==========================================================================
162
-
163
- @log_function_io
164
- def generate_low_resolution(self, prompt: str, **kwargs) -> Tuple[Optional[str], Optional[str], Optional[int]]:
165
- """
166
- [UNIFIED ORCHESTRATOR] Generates a low-resolution video from a prompt.
167
- Handles both single-line and multi-line prompts transparently.
168
- """
169
- logging.info("Starting unified low-resolution generation (random seed)...")
170
- used_seed = self._get_random_seed()
171
- seed_everything(used_seed)
172
- logging.info(f"Using randomly generated seed: {used_seed}")
173
-
174
- prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()]
175
- if not prompt_list: raise ValueError("Prompt is empty or contains no valid lines.")
176
-
177
- is_narrative = len(prompt_list) > 1
178
- logging.info(f"Generation mode detected: {'Narrative' if is_narrative else 'Simple'} ({len(prompt_list)} scene(s)).")
179
-
180
- num_chunks = len(prompt_list)
181
- total_frames = self._calculate_aligned_frames(kwargs.get("duration", 4.0))
182
- frames_per_chunk = max(FRAMES_ALIGNMENT, (total_frames // num_chunks // FRAMES_ALIGNMENT) * FRAMES_ALIGNMENT)
183
- overlap_frames = self.config.get("overlap_frames", 8) if is_narrative else 0
184
-
185
- temp_latent_paths = []
186
- overlap_condition_item = None
187
-
188
- try:
189
- for i, chunk_prompt in enumerate(prompt_list):
190
- logging.info(f"Processing scene {i+1}/{num_chunks}: '{chunk_prompt[:50]}...'")
191
-
192
- if i == num_chunks - 1:
193
- processed_frames = (num_chunks - 1) * frames_per_chunk
194
- current_frames = total_frames - processed_frames
195
- else:
196
- current_frames = frames_per_chunk
197
-
198
- if i > 0: current_frames += overlap_frames
199
-
200
- current_conditions = kwargs.get("initial_conditions", []) if i == 0 else []
201
- if overlap_condition_item: current_conditions.append(overlap_condition_item)
202
-
203
- chunk_latents = self._generate_single_chunk_low(
204
- prompt=chunk_prompt, num_frames=current_frames, seed=used_seed + i,
205
- conditioning_items=current_conditions, **kwargs
206
- )
207
- if chunk_latents is None: raise RuntimeError(f"Failed to generate latents for scene {i+1}.")
208
-
209
- if is_narrative and i < num_chunks - 1:
210
- overlap_latents = chunk_latents[:, :, -overlap_frames:, :, :].clone()
211
- overlap_condition_item = ConditioningItem(media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0)
212
-
213
- if i > 0: chunk_latents = chunk_latents[:, :, overlap_frames:, :, :]
214
-
215
- chunk_path = RESULTS_DIR / f"temp_chunk_{i}_{used_seed}.pt"
216
- torch.save(chunk_latents.cpu(), chunk_path)
217
- temp_latent_paths.append(chunk_path)
218
-
219
- base_filename = "narrative_video" if is_narrative else "single_video"
220
- return self._finalize_generation(temp_latent_paths, base_filename, used_seed)
221
- except Exception as e:
222
- logging.error(f"Error during unified generation: {e}", exc_info=True)
223
- return None, None, None
224
- finally:
225
- for path in temp_latent_paths:
226
- if path.exists(): path.unlink()
227
- self.finalize()
228
-
229
- # ==========================================================================
230
- # --- UNIDADES DE TRABALHO E HELPERS INTERNOS ---
231
- # ==========================================================================
232
-
233
- @log_function_io
234
- def _generate_single_chunk_low(self, **kwargs) -> Optional[torch.Tensor]:
235
- """[WORKER] Calls the pipeline to generate a single chunk of latents."""
236
- height_padded, width_padded = (self._align(d) for d in (kwargs['height'], kwargs['width']))
237
- downscale_factor = self.config.get("downscale_factor", 0.6666666)
238
- vae_scale_factor = self.pipeline.vae_scale_factor
239
- downscaled_height = self._align(int(height_padded * downscale_factor), vae_scale_factor)
240
- downscaled_width = self._align(int(width_padded * downscale_factor), vae_scale_factor)
241
-
242
- first_pass_config = self.config.get("first_pass", {}).copy()
243
- if kwargs.get("ltx_configs_override"):
244
- self._apply_ui_overrides(first_pass_config, kwargs["ltx_configs_override"])
245
-
246
- pipeline_kwargs = {
247
- "prompt": kwargs['prompt'], "negative_prompt": kwargs['negative_prompt'],
248
- "height": downscaled_height, "width": downscaled_width, "num_frames": kwargs['num_frames'],
249
- "frame_rate": DEFAULT_FPS, "generator": torch.Generator(device=self.main_device).manual_seed(kwargs['seed']),
250
- "output_type": "latent", "conditioning_items": kwargs['conditioning_items'], **first_pass_config
251
- }
252
-
253
- with torch.autocast(device_type=self.main_device.type, dtype=self.runtime_autocast_dtype, enabled="cuda" in self.main_device.type):
254
- latents_raw = self.pipeline(**pipeline_kwargs).images
255
-
256
- return latents_raw.to(self.main_device)
257
-
258
- @log_function_io
259
- def _finalize_generation(self, temp_latent_paths: List[Path], base_filename: str, seed: int) -> Tuple[str, str, int]:
260
- """Consolidates latents, decodes them to video, and saves final artifacts."""
261
- logging.info("Finalizing generation: decoding latents to video.")
262
- all_tensors_cpu = [torch.load(p) for p in temp_latent_paths]
263
- final_latents = torch.cat(all_tensors_cpu, dim=2)
264
-
265
- final_latents_path = RESULTS_DIR / f"latents_{base_filename}_{seed}.pt"
266
- torch.save(final_latents, final_latents_path)
267
- logging.info(f"Final latents saved to: {final_latents_path}")
268
-
269
- pixel_tensor = vae_manager_singleton.decode(
270
- final_latents, decode_timestep=float(self.config.get("decode_timestep", 0.05))
271
- )
272
- video_path = self._save_and_log_video(pixel_tensor, f"{base_filename}_{seed}")
273
- return str(video_path), str(final_latents_path), seed
274
-
275
- @log_function_io
276
- def prepare_condition_items(self, items_list: List, height: int, width: int, num_frames: int) -> List[ConditioningItem]:
277
- """[UNIFIED] Prepares ConditioningItems from a mixed list of file paths and tensors."""
278
- if not items_list: return []
279
- height_padded, width_padded = self._align(height), self._align(width)
280
- padding_values = calculate_padding(height, width, height_padded, width_padded)
281
-
282
- conditioning_items = []
283
- for media_item, frame, weight in items_list:
284
- if isinstance(media_item, str):
285
- tensor = load_image_to_tensor_with_resize_and_crop(media_item, height, width)
286
- tensor = torch.nn.functional.pad(tensor, padding_values)
287
- tensor = tensor.to(self.main_device, dtype=self.runtime_autocast_dtype)
288
- elif isinstance(media_item, torch.Tensor):
289
- tensor = media_item.to(self.main_device, dtype=self.runtime_autocast_dtype)
290
- else:
291
- logging.warning(f"Unknown conditioning media type: {type(media_item)}. Skipping.")
292
- continue
293
-
294
- safe_frame = max(0, min(int(frame), num_frames - 1))
295
- conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
296
- return conditioning_items
297
-
298
- def _apply_ui_overrides(self, config_dict: Dict, overrides: Dict):
299
- """Applies advanced settings from the UI to a config dictionary."""
300
- # Override step counts
301
- for key in ["num_inference_steps", "skip_initial_inference_steps", "skip_final_inference_steps"]:
302
- ui_value = overrides.get(key)
303
- if ui_value and ui_value > 0:
304
- config_dict[key] = ui_value
305
- logging.info(f"Override: '{key}' set to {ui_value} by UI.")
306
-
307
- # Override guidance settings
308
- preset = overrides.get("guidance_preset", "Padrão (Recomendado)")
309
- guidance_overrides = {}
310
- if preset == "Agressivo":
311
- guidance_overrides = {"guidance_scale": [1, 2, 8, 12, 8, 2, 1], "stg_scale": [0, 0, 5, 6, 5, 3, 2]}
312
- elif preset == "Suave":
313
- guidance_overrides = {"guidance_scale": [1, 1, 4, 5, 4, 1, 1], "stg_scale": [0, 0, 2, 2, 2, 1, 0]}
314
- elif preset == "Customizado":
315
- try:
316
- guidance_overrides["guidance_scale"] = json.loads(overrides["guidance_scale_list"])
317
- guidance_overrides["stg_scale"] = json.loads(overrides["stg_scale_list"])
318
- except Exception as e:
319
- logging.warning(f"Failed to parse custom guidance values: {e}. Using defaults.")
320
-
321
- if guidance_overrides:
322
- config_dict.update(guidance_overrides)
323
- logging.info(f"Applying '{preset}' guidance preset overrides.")
324
-
325
- def _save_and_log_video(self, pixel_tensor: torch.Tensor, base_filename: str) -> Path:
326
- with tempfile.TemporaryDirectory() as temp_dir:
327
- temp_path = os.path.join(temp_dir, f"{base_filename}.mp4")
328
- video_encode_tool_singleton.save_video_from_tensor(pixel_tensor, temp_path, fps=DEFAULT_FPS)
329
- final_path = RESULTS_DIR / f"{base_filename}.mp4"
330
- shutil.move(temp_path, final_path)
331
- logging.info(f"Video saved successfully to: {final_path}")
332
- return final_path
333
 
334
- def _apply_precision_policy(self):
335
- precision = str(self.config.get("precision", "bfloat16")).lower()
336
- if precision in ["float8_e4m3fn", "bfloat16"]: self.runtime_autocast_dtype = torch.bfloat16
337
- elif precision == "mixed_precision": self.runtime_autocast_dtype = torch.float16
338
- else: self.runtime_autocast_dtype = torch.float32
339
- logging.info(f"Runtime precision policy set for autocast: {self.runtime_autocast_dtype}")
340
-
341
- def _align(self, dim: int, alignment: int = FRAMES_ALIGNMENT) -> int:
342
- return ((dim - 1) // alignment + 1) * alignment
 
343
 
344
- def _calculate_aligned_frames(self, duration_s: float, min_frames: int = 1) -> int:
345
- num_frames = int(round(duration_s * DEFAULT_FPS))
346
- aligned_frames = self._align(num_frames)
347
- return max(aligned_frames, min_frames) # Use max(aligned_frames) para garantir alinhamento
 
 
 
 
 
 
 
 
 
 
 
 
 
348
 
349
- def _get_random_seed(self) -> int:
350
- """Always generates and returns a new random seed."""
351
- return random.randint(0, 2**32 - 1)
352
 
353
  # ==============================================================================
354
- # --- INSTANCIAÇÃO SINGLETON ---
355
  # ==============================================================================
356
- try:
357
- video_generation_service = VideoService()
358
- logging.info("Global VideoService orchestrator instance created successfully.")
359
- except Exception as e:
360
- logging.critical(f"Failed to initialize VideoService: {e}", exc_info=True)
361
- sys.exit(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FILE: api/ltx/ltx_utils.py
2
+ # DESCRIPTION: Comprehensive, self-contained utility module for the LTX pipeline.
3
+ # Handles dependency path injection, model loading, data structures, and helper functions.
 
4
 
5
+ import os
6
+ import random
7
  import json
8
  import logging
 
 
 
 
9
  import time
10
+ import sys
11
  from pathlib import Path
12
+ from typing import Dict, Optional, Tuple, Union
13
+ from dataclasses import dataclass
14
+ from enum import Enum, auto
15
 
 
 
16
  import numpy as np
17
+ import torch
18
+ import torchvision.transforms.functional as TVF
19
+ from PIL import Image
20
+ from safetensors import safe_open
21
+ from transformers import T5EncoderModel, T5Tokenizer
22
 
23
  # ==============================================================================
24
+ # --- CRITICAL: DEPENDENCY PATH INJECTION ---
25
  # ==============================================================================
26
 
27
+ # Define o caminho para o repositório clonado
28
+ LTX_VIDEO_REPO_DIR = Path("/data/LTX-Video")
29
+
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def add_deps_to_path():
31
+ """
32
+ Adiciona o diretório do repositório LTX ao sys.path para garantir que suas
33
+ bibliotecas possam ser importadas.
34
+ """
35
  repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
36
  if repo_path not in sys.path:
37
  sys.path.insert(0, repo_path)
38
+ logging.info(f"[ltx_utils] LTX-Video repository added to sys.path: {repo_path}")
39
 
40
+ # Executa a função imediatamente para configurar o ambiente antes de qualquer importação.
41
  add_deps_to_path()
42
 
43
+
44
+ # ==============================================================================
45
+ # --- IMPORTAÇÕES DA BIBLIOTECA LTX-VIDEO (Após configuração do path) ---
46
+ # ==============================================================================
47
  try:
48
+ from ltx_video.pipelines.pipeline_ltx_video import LTXVideoPipeline
49
+ from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler
50
+ from ltx_video.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
51
+ from ltx_video.models.transformers.transformer3d import Transformer3DModel
52
+ from ltx_video.models.transformers.symmetric_patchifier import SymmetricPatchifier
53
+ from ltx_video.schedulers.rf import RectifiedFlowScheduler
54
+ from ltx_video.models.autoencoders.vae_encode import un_normalize_latents, normalize_latents
55
+ import ltx_video.pipelines.crf_compressor as crf_compressor
56
  except ImportError as e:
57
+ raise ImportError(f"Could not import from LTX-Video library even after setting sys.path. Check repo integrity at '{LTX_VIDEO_REPO_DIR}'. Error: {e}")
58
+
59
 
60
  # ==============================================================================
61
+ # --- ESTRUTURAS DE DADOS E ENUMS (Centralizadas aqui) ---
62
  # ==============================================================================
63
 
64
+ @dataclass
65
+ class ConditioningItem:
66
+ """Define a single frame-conditioning item, used to guide the generation pipeline."""
67
+ media_item: torch.Tensor
68
+ media_frame_number: int
69
+ conditioning_strength: float
70
+ media_x: Optional[int] = None
71
+ media_y: Optional[int] = None
72
+
73
+
74
+ class SkipLayerStrategy(Enum):
75
+ """Defines the strategy for how spatio-temporal guidance is applied across transformer blocks."""
76
+ AttentionSkip = auto()
77
+ AttentionValues = auto()
78
+ Residual = auto()
79
+ TransformerBlock = auto()
80
+
81
 
82
  # ==============================================================================
83
+ # --- FUNÇÕES DE CONSTRUÇÃO DE MODELO E PIPELINE ---
84
  # ==============================================================================
85
 
86
+ def create_latent_upsampler(latent_upsampler_model_path: str, device: str) -> LatentUpsampler:
87
+ """Loads the Latent Upsampler model from a checkpoint path."""
88
+ logging.info(f"Loading Latent Upsampler from: {latent_upsampler_model_path} to device: {device}")
89
+ latent_upsampler = LatentUpsampler.from_pretrained(latent_upsampler_model_path)
90
+ latent_upsampler.to(device)
91
+ latent_upsampler.eval()
92
+ return latent_upsampler
93
+
94
+ def build_ltx_pipeline_on_cpu(config: Dict) -> Tuple[LTXVideoPipeline, Optional[torch.nn.Module]]:
95
+ """Builds the complete LTX pipeline and upsampler on the CPU."""
96
+ t0 = time.perf_counter()
97
+ logging.info("Building LTX pipeline on CPU...")
98
+
99
+ ckpt_path = Path(config["checkpoint_path"])
100
+ if not ckpt_path.is_file():
101
+ raise FileNotFoundError(f"Main checkpoint file not found: {ckpt_path}")
102
+
103
+ with safe_open(ckpt_path, framework="pt") as f:
104
+ metadata = f.metadata() or {}
105
+ config_str = metadata.get("config", "{}")
106
+ configs = json.loads(config_str)
107
+ allowed_inference_steps = configs.get("allowed_inference_steps")
108
+
109
+ vae = CausalVideoAutoencoder.from_pretrained(ckpt_path).to("cpu")
110
+ transformer = Transformer3DModel.from_pretrained(ckpt_path).to("cpu")
111
+ scheduler = RectifiedFlowScheduler.from_pretrained(ckpt_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
+ text_encoder_path = config["text_encoder_model_name_or_path"]
114
+ text_encoder = T5EncoderModel.from_pretrained(text_encoder_path, subfolder="text_encoder").to("cpu")
115
+ tokenizer = T5Tokenizer.from_pretrained(text_encoder_path, subfolder="tokenizer")
116
+ patchifier = SymmetricPatchifier(patch_size=1)
117
+
118
+ precision = config.get("precision", "bfloat16")
119
+ if precision == "bfloat16":
120
+ vae.to(torch.bfloat16)
121
+ transformer.to(torch.bfloat16)
122
+ text_encoder.to(torch.bfloat16)
123
 
124
+ pipeline = LTXVideoPipeline(
125
+ transformer=transformer, patchifier=patchifier, text_encoder=text_encoder,
126
+ tokenizer=tokenizer, scheduler=scheduler, vae=vae,
127
+ allowed_inference_steps=allowed_inference_steps,
128
+ prompt_enhancer_image_caption_model=None, prompt_enhancer_image_caption_processor=None,
129
+ prompt_enhancer_llm_model=None, prompt_enhancer_llm_tokenizer=None,
130
+ )
131
+
132
+ latent_upsampler = None
133
+ if config.get("spatial_upscaler_model_path"):
134
+ spatial_path = config["spatial_upscaler_model_path"]
135
+ latent_upsampler = create_latent_upsampler(spatial_path, device="cpu")
136
+ if precision == "bfloat16":
137
+ latent_upsampler.to(torch.bfloat16)
138
+
139
+ logging.info(f"LTX pipeline built on CPU in {time.perf_counter() - t0:.2f}s")
140
+ return pipeline, latent_upsampler
141
 
 
 
 
142
 
143
  # ==============================================================================
144
+ # --- FUNÇÕES AUXILIARES (Latent Processing, Seed, Image Prep) ---
145
  # ==============================================================================
146
+
147
+ def adain_filter_latent(
148
+ latents: torch.Tensor, reference_latents: torch.Tensor, factor=1.0
149
+ ) -> torch.Tensor:
150
+ """Applies AdaIN to transfer the style from a reference latent to another."""
151
+ result = latents.clone()
152
+ for i in range(latents.size(0)):
153
+ for c in range(latents.size(1)):
154
+ r_sd, r_mean = torch.std_mean(reference_latents[i, c], dim=None)
155
+ i_sd, i_mean = torch.std_mean(result[i, c], dim=None)
156
+ if i_sd > 1e-6:
157
+ result[i, c] = ((result[i, c] - i_mean) / i_sd) * r_sd + r_mean
158
+ return torch.lerp(latents, result, factor)
159
+
160
+ def seed_everything(seed: int):
161
+ """Sets the seed for reproducibility."""
162
+ random.seed(seed)
163
+ os.environ['PYTHONHASHSEED'] = str(seed)
164
+ np.random.seed(seed)
165
+ torch.manual_seed(seed)
166
+ torch.cuda.manual_seed_all(seed)
167
+ torch.backends.cudnn.deterministic = True
168
+ torch.backends.cudnn.benchmark = False
169
+
170
+ def load_image_to_tensor_with_resize_and_crop(
171
+ image_input: Union[str, Image.Image],
172
+ target_height: int,
173
+ target_width: int,
174
+ ) -> torch.Tensor:
175
+ """Loads and processes an image into a 5D tensor compatible with the LTX pipeline."""
176
+ if isinstance(image_input, str):
177
+ image = Image.open(image_input).convert("RGB")
178
+ elif isinstance(image_input, Image.Image):
179
+ image = image_input
180
+ else:
181
+ raise ValueError("image_input must be a file path or a PIL Image object")
182
+
183
+ input_width, input_height = image.size
184
+ aspect_ratio_target = target_width / target_height
185
+ aspect_ratio_frame = input_width / input_height
186
+
187
+ if aspect_ratio_frame > aspect_ratio_target:
188
+ new_width, new_height = int(input_height * aspect_ratio_target), input_height
189
+ x_start, y_start = (input_width - new_width) // 2, 0
190
+ else:
191
+ new_width, new_height = input_width, int(input_width / aspect_ratio_target)
192
+ x_start, y_start = 0, (input_height - new_height) // 2
193
+
194
+ image = image.crop((x_start, y_start, x_start + new_width, y_start + new_height))
195
+ image = image.resize((target_width, target_height), Image.Resampling.LANCZOS)
196
+
197
+ frame_tensor = TVF.to_tensor(image)
198
+ frame_tensor = TVF.gaussian_blur(frame_tensor, kernel_size=(3, 3))
199
+
200
+ frame_tensor_hwc = frame_tensor.permute(1, 2, 0)
201
+ frame_tensor_hwc = crf_compressor.compress(frame_tensor_hwc)
202
+ frame_tensor = frame_tensor_hwc.permute(2, 0, 1)
203
+ # Normalize to [-1, 1] range
204
+ frame_tensor = (frame_tensor * 2.0) - 1.0
205
+
206
+ # Create 5D tensor: (batch_size=1, channels=3, num_frames=1, height, width)
207
+ return frame_tensor.unsqueeze(0).unsqueeze(2)