Eueuiaa commited on
Commit
2134e1a
·
verified ·
1 Parent(s): f9ef94f

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +248 -69
api/ltx_server_refactored.py CHANGED
@@ -169,55 +169,11 @@ class VideoService:
169
  def _apply_precision_policy(self):
170
  prec = str(self.config.get("precision", "")).lower()
171
  self.runtime_autocast_dtype = torch.float32
172
- print(f"[DEBUG] Aplicando política de precisão: {prec}")
173
- if prec == "float8_e4m3fn":
174
- self.runtime_autocast_dtype = torch.bfloat16
175
- force_promote = True #os.getenv("LTXV_FORCE_BF16_ON_FP8", "0") == "1"
176
- print(f"[DEBUG] FP8 detectado. force_promote={force_promote}")
177
- if force_promote: # and hasattr(torch, "float8_e4m3fn"):
178
- try:
179
- self._promote_fp8_weights_to_bf16(self.pipeline)
180
- except Exception as e:
181
- print(f"[DEBUG] Promoção FP8→BF16 na pipeline falhou: {e}")
182
- try:
183
- if self.latent_upsampler:
184
- self._promote_fp8_weights_to_bf16(self.latent_upsampler)
185
- except Exception as e:
186
- print(f"[DEBUG] Promoção FP8→BF16 no upsampler falhou: {e}")
187
- elif prec == "bfloat16":
188
  self.runtime_autocast_dtype = torch.bfloat16
189
  elif prec == "mixed_precision":
190
  self.runtime_autocast_dtype = torch.float16
191
- else:
192
- self.runtime_autocast_dtype = torch.float32
193
-
194
-
195
-
196
- def _promote_fp8_weights_to_bf16(self, module):
197
- if not isinstance(module, torch.nn.Module):
198
- print("[DEBUG] Promoção FP8→BF16 ignorada: alvo não é nn.Module.")
199
- return
200
- f8 = getattr(torch, "float8_e4m3fn", None)
201
- if f8 is None:
202
- print("[DEBUG] torch.float8_e4m3fn indisponível.")
203
- return
204
- p_cnt = b_cnt = 0
205
- for _, p in module.named_parameters(recurse=True):
206
- try:
207
- if p.dtype == f8:
208
- with torch.no_grad():
209
- p.data = p.data.to(torch.bfloat16); p_cnt += 1
210
- except Exception:
211
- pass
212
- for _, b in module.named_buffers(recurse=True):
213
- try:
214
- if hasattr(b, "dtype") and b.dtype == f8:
215
- b.data = b.data.to(torch.bfloat16); b_cnt += 1
216
- except Exception:
217
- pass
218
- print(f"[DEBUG] FP8→BF16: params_promoted={p_cnt}, buffers_promoted={b_cnt}")
219
-
220
-
221
  def _register_tmp_dir(self, d: str):
222
  if d and os.path.isdir(d):
223
  self._tmp_dirs.add(d); print(f"[DEBUG] Registrado tmp dir: {d}")
@@ -253,11 +209,11 @@ class VideoService:
253
  print(f"[DEBUG] Vídeo salvo em: {final_path}")
254
  return final_path
255
 
 
 
 
256
 
257
- def prepare_condition_items(
258
- self, items_list: List, height: int,
259
- width: int, num_frames: int,
260
- ):
261
  if not items_list: return []
262
  height_padded = ((height - 1) // 8 + 1) * 8
263
  width_padded = ((width - 1) // 8 + 1) * 8
@@ -269,18 +225,7 @@ class VideoService:
269
  conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
270
  return conditioning_items
271
 
272
-
273
- # ==============================================================================
274
- # --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
275
- # ==============================================================================
276
- def generate_low(
277
- self, prompt, negative_prompt,
278
- height, width, duration, seed,
279
- conditioning_items=None,
280
- conditions_itens=None,
281
- ltx_configs_override: dict = None,
282
- ):
283
- guidance_scale=4
284
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
285
  seed_everething(used_seed)
286
  FPS = 24.0
@@ -299,7 +244,7 @@ class VideoService:
299
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
300
  "num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
301
  "output_type": "latent", "conditioning_items": conditioning_items,
302
- "guidance_scale": float(guidance_scale),
303
  **(self.config.get("first_pass", {}))
304
  }
305
  try:
@@ -311,18 +256,252 @@ class VideoService:
311
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
312
  torch.save(latents_cpu, tensor_path)
313
  return video_path, tensor_path, used_seed
 
314
  except Exception as e:
315
- pass
316
  finally:
317
  torch.cuda.empty_cache()
318
  torch.cuda.ipc_collect()
319
  self.finalize(keep_paths=[])
320
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
  # ==============================================================================
322
  # --- FUNÇÃO #4: ORQUESTRADOR (Upscaler + texturas hd) ---
323
  # ==============================================================================
324
  def generate_upscale_denoise(
325
- self, latents_path, prompt, negative_prompt, seed
 
326
  ):
327
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
328
  seed_everething(used_seed)
@@ -417,9 +596,6 @@ class VideoService:
417
 
418
  # 4. Configurar o resto dos componentes com o dispositivo correto
419
  self._apply_precision_policy()
420
- print(f"[DEBUG] runtime_autocast_dtype = {getattr(self, 'runtime_autocast_dtype', None)}")
421
-
422
-
423
  vae_manager_singleton.attach_pipeline(
424
  self.pipeline,
425
  device=self.device, # Agora `self.device` está correto
@@ -428,6 +604,7 @@ class VideoService:
428
  self._tmp_dirs = set()
429
  print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
430
 
 
431
  def move_to_device(self, device):
432
  """Move os modelos do pipeline para o dispositivo especificado."""
433
  print(f"[LTX] Movendo modelos para {device}...")
@@ -443,6 +620,8 @@ class VideoService:
443
  if torch.cuda.is_available():
444
  torch.cuda.empty_cache()
445
 
 
 
446
  print("Criando instância do VideoService...")
447
  video_generation_service = VideoService()
448
  print("Instância do VideoService pronta.")
 
169
  def _apply_precision_policy(self):
170
  prec = str(self.config.get("precision", "")).lower()
171
  self.runtime_autocast_dtype = torch.float32
172
+ if prec in ["float8_e4m3fn", "bfloat16"]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  self.runtime_autocast_dtype = torch.bfloat16
174
  elif prec == "mixed_precision":
175
  self.runtime_autocast_dtype = torch.float16
176
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
177
  def _register_tmp_dir(self, d: str):
178
  if d and os.path.isdir(d):
179
  self._tmp_dirs.add(d); print(f"[DEBUG] Registrado tmp dir: {d}")
 
209
  print(f"[DEBUG] Vídeo salvo em: {final_path}")
210
  return final_path
211
 
212
+ # ==============================================================================
213
+ # --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
214
+ # ==============================================================================
215
 
216
+ def prepare_condition_items(self, items_list: List, height: int, width: int, num_frames: int):
 
 
 
217
  if not items_list: return []
218
  height_padded = ((height - 1) // 8 + 1) * 8
219
  width_padded = ((width - 1) // 8 + 1) * 8
 
225
  conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
226
  return conditioning_items
227
 
228
+ def generate_low(self, prompt, negative_prompt, height, width, duration, guidance_scale, seed, conditioning_items=None):
 
 
 
 
 
 
 
 
 
 
 
229
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
230
  seed_everething(used_seed)
231
  FPS = 24.0
 
244
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
245
  "num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
246
  "output_type": "latent", "conditioning_items": conditioning_items,
247
+ #"guidance_scale": float(guidance_scale),
248
  **(self.config.get("first_pass", {}))
249
  }
250
  try:
 
256
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
257
  torch.save(latents_cpu, tensor_path)
258
  return video_path, tensor_path, used_seed
259
+
260
  except Exception as e:
261
+ print(f"[DEBUG] falhou: {e}")
262
  finally:
263
  torch.cuda.empty_cache()
264
  torch.cuda.ipc_collect()
265
  self.finalize(keep_paths=[])
266
+
267
+ # ==============================================================================
268
+ # --- FUNÇÃO #1: GERADOR DE CHUNK ÚNICO (AUXILIAR INTERNA) ---
269
+ # ==============================================================================
270
+ def _generate_single_chunk_low(
271
+ self, prompt, negative_prompt,
272
+ height, width, num_frames, guidance_scale,
273
+ seed, initial_latent_condition=None, image_conditions=None,
274
+ ltx_configs_override=None):
275
+ """
276
+ [NÓ DE GERAÇÃO]
277
+ Gera um ÚNICO chunk de latentes brutos. Esta é a unidade de trabalho fundamental.
278
+ """
279
+ print("\n" + "-"*20 + " INÍCIO: _generate_single_chunk_low " + "-"*20)
280
+
281
+ # --- NÓ 1.1: SETUP DE PARÂMETROS ---
282
+ height_padded = ((height - 1) // 8 + 1) * 8
283
+ width_padded = ((width - 1) // 8 + 1) * 8
284
+ generator = torch.Generator(device=self.device).manual_seed(seed)
285
+
286
+ downscale_factor = self.config.get("downscale_factor", 0.6666666)
287
+ vae_scale_factor = self.pipeline.vae_scale_factor
288
+
289
+ x_width = int(width_padded * downscale_factor)
290
+ downscaled_width = x_width - (x_width % vae_scale_factor)
291
+ x_height = int(height_padded * downscale_factor)
292
+ downscaled_height = x_height - (x_height % vae_scale_factor)
293
+
294
+ # --- NÓ 1.2: MONTAGEM DE CONDIÇÕES E OVERRIDES ---
295
+ all_conditions = []
296
+ if image_conditions: all_conditions.extend(image_conditions)
297
+ if initial_latent_condition: all_conditions.append(initial_latent_condition)
298
+
299
+ first_pass_config = self.config.get("first_pass", {}).copy()
300
+
301
+ if ltx_configs_override:
302
+ print("[DEBUG] Sobrepondo configurações do LTX com valores da UI...")
303
+ preset = ltx_configs_override.get("guidance_preset")
304
+ if preset == "Customizado":
305
+ try:
306
+ first_pass_config["guidance_scale"] = json.loads(ltx_configs_override["guidance_scale_list"])
307
+ first_pass_config["stg_scale"] = json.loads(ltx_configs_override["stg_scale_list"])
308
+ #first_pass_config["guidance_timesteps"] = json.loads(ltx_configs_override["timesteps_list"])
309
+ except Exception as e:
310
+ print(f" > ERRO ao parsear valores customizados: {e}. Usando Padrão como fallback.")
311
+ elif preset == "Agressivo":
312
+ first_pass_config["guidance_scale"] = [1, 2, 8, 12, 8, 2, 1]
313
+ first_pass_config["stg_scale"] = [0, 0, 5, 6, 5, 3, 2]
314
+ elif preset == "Suave":
315
+ first_pass_config["guidance_scale"] = [1, 1, 4, 5, 4, 1, 1]
316
+ first_pass_config["stg_scale"] = [0, 0, 2, 2, 2, 1, 0]
317
+
318
+ first_pass_kwargs = {
319
+ "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
320
+ "num_frames": num_frames, "frame_rate": 24, "generator": generator, "output_type": "latent",
321
+ "conditioning_items": all_conditions if all_conditions else None,
322
+ **first_pass_config
323
+ }
324
+
325
+ # --- NÓ 1.3: CHAMADA AO PIPELINE ---
326
+ try:
327
+ with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
328
+ latents_bruto = self.pipeline(**first_pass_kwargs).images
329
+ latents_cpu_bruto = latents_bruto.detach().to("cpu")
330
+ tensor_path_cpu = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
331
+ torch.save(latents_cpu_bruto, tensor_path_cpu)
332
+ log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
333
+
334
+ print("-" * 20 + " FIM: _generate_single_chunk_low " + "-"*20)
335
+ return tensor_path_cpu
336
+ except Exception as e:
337
+ print("-" * 20 + f" ERRO: _generate_single_chunk_low {e} " + "-"*20)
338
+ finally:
339
+ torch.cuda.empty_cache()
340
+ torch.cuda.ipc_collect()
341
+ self.finalize(keep_paths=[])
342
+
343
+ # ==============================================================================
344
+ # --- FUNÇÃO #2: ORQUESTRADOR NARRATIVO (MÚLTIPLOS PROMPTS) ---
345
+ # ==============================================================================
346
+ def generate_narrative_low(
347
+ self, prompt: str, negative_prompt,
348
+ height, width, duration, guidance_scale,
349
+ seed, initial_image_conditions=None, overlap_frames: int = 8,
350
+ ltx_configs_override: dict = None):
351
+ """
352
+ [ORQUESTRADOR NARRATIVO]
353
+ Gera um vídeo em múltiplos chunks sequenciais a partir de um prompt com várias linhas.
354
+ """
355
+ print("\n" + "="*80)
356
+ print("====== INICIANDO GERAÇÃO NARRATIVA EM CHUNKS (LOW-RES) ======")
357
+ print("="*80)
358
+
359
+ used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
360
+ seed_everething(used_seed)
361
+ FPS = 24.0
362
+
363
+ prompt_list = [p.strip() for p in prompt.splitlines() if p.strip()]
364
+ num_chunks = len(prompt_list)
365
+ if num_chunks == 0: raise ValueError("O prompt está vazio ou não contém linhas válidas.")
366
+
367
+ total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
368
+
369
+ if num_chunks > 1:
370
+ total_blocks = (total_actual_frames - 1) // 8
371
+ blocks_per_chunk = total_blocks // num_chunks
372
+ blocks_last_chunk = total_blocks - (blocks_per_chunk * (num_chunks - 1))
373
+ frames_per_chunk = blocks_per_chunk * 8 + 1
374
+ frames_per_chunk_last = blocks_last_chunk * 8 + 1
375
+ else:
376
+ frames_per_chunk = total_actual_frames
377
+ frames_per_chunk_last = total_actual_frames
378
+
379
+ frames_per_chunk = max(9, frames_per_chunk)
380
+ frames_per_chunk_last = max(9, frames_per_chunk_last)
381
+
382
+ poda_latents_num = overlap_frames // self.pipeline.video_scale_factor if self.pipeline.video_scale_factor > 0 else 0
383
+
384
+ latentes_chunk_video = []
385
+ condition_item_latent_overlap = None
386
+ temp_dir = tempfile.mkdtemp(prefix="ltxv_narrative_"); self._register_tmp_dir(temp_dir)
387
+ results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
388
+
389
+ for i, chunk_prompt in enumerate(prompt_list):
390
+ print(f"\n--- Gerando Chunk Narrativo {i+1}/{num_chunks}: '{chunk_prompt}' ---")
391
+
392
+ current_image_conditions = []
393
+ if initial_image_conditions:
394
+ cond_item_original = initial_image_conditions[0]
395
+ if i == 0:
396
+ current_image_conditions.append(cond_item_original)
397
+ else:
398
+ cond_item_fraco = ConditioningItem(
399
+ media_item=cond_item_original.media_item, media_frame_number=0, conditioning_strength=0.1
400
+ )
401
+ current_image_conditions.append(cond_item_fraco)
402
+
403
+ num_frames_para_gerar = frames_per_chunk_last if i == num_chunks - 1 else frames_per_chunk
404
+ if i > 0 and poda_latents_num > 0:
405
+ num_frames_para_gerar += overlap_frames
406
+
407
+ latentes_bruto = self._generate_single_chunk_low(
408
+ prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
409
+ num_frames=num_frames_para_gerar, guidance_scale=guidance_scale, seed=used_seed + i,
410
+ initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
411
+ ltx_configs_override=ltx_configs_override
412
+ )
413
+
414
+ if i > 0 and poda_latents_num > 0:
415
+ latentes_bruto = latentes_bruto[:, :, poda_latents_num:, :, :]
416
+
417
+ latentes_podado = latentes_bruto.clone().detach()
418
+ if i < num_chunks - 1 and poda_latents_num > 0:
419
+ latentes_podado = latentes_bruto[:, :, :-poda_latents_num, :, :].clone()
420
+ overlap_latents = latentes_bruto[:, :, -poda_latents_num:, :, :].clone()
421
+ condition_item_latent_overlap = ConditioningItem(
422
+ media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0
423
+ )
424
+ latentes_chunk_video.append(latentes_podado)
425
+
426
+ print("\n--- Finalizando Narrativa: Concatenando chunks ---")
427
+ final_latents = torch.cat(latentes_chunk_video, dim=2)
428
+ log_tensor_info(final_latents, "Tensor de Latentes Final Concatenado")
429
+
430
+ try:
431
+ with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
432
+ pixel_tensor = vae_manager_singleton.decode(final_latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
433
+ video_path = self._save_and_log_video(pixel_tensor, "narrative_video", FPS, temp_dir, results_dir, used_seed)
434
+ latents_cpu = latents.detach().to("cpu")
435
+ tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
436
+ torch.save(latents_cpu, tensor_path)
437
+ return video_path, tensor_path, used_seed
438
+
439
+ except Exception as e:
440
+ print(f"[DEBUG] falhou: {e}")
441
+ finally:
442
+ torch.cuda.empty_cache()
443
+ torch.cuda.ipc_collect()
444
+ self.finalize(keep_paths=[])
445
+
446
+ # ==============================================================================
447
+ # --- FUNÇÃO #3: ORQUESTRADOR SIMPLES (PROMPT ÚNICO) ---
448
+ # ==============================================================================
449
+ def generate_single_low(
450
+ self, prompt: str, negative_prompt,
451
+ height, width, duration, guidance_scale,
452
+ seed, initial_image_conditions=None,
453
+ ltx_configs_override: dict = None):
454
+ """
455
+ [ORQUESTRADOR SIMPLES]
456
+ Gera um vídeo completo em um único chunk. Ideal para prompts simples e curtos.
457
+ """
458
+ print("\n" + "="*80)
459
+ print("====== INICIANDO GERAÇÃO SIMPLES EM CHUNK ÚNICO (LOW-RES) ======")
460
+ print("="*80)
461
+
462
+ used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
463
+ seed_everething(used_seed)
464
+ FPS = 24.0
465
+
466
+ total_actual_frames = max(9, int(round((round(duration * FPS) - 1) / 8.0) * 8 + 1))
467
+
468
+ temp_dir = tempfile.mkdtemp(prefix="ltxv_single_"); self._register_tmp_dir(temp_dir)
469
+ results_dir = "/app/output"; os.makedirs(results_dir, exist_ok=True)
470
+
471
+ # Chama a função de geração de chunk único para fazer todo o trabalho
472
+ final_latents = self._generate_single_chunk_low(
473
+ prompt=prompt, negative_prompt=negative_prompt, height=height, width=width,
474
+ num_frames=total_actual_frames, guidance_scale=guidance_scale, seed=used_seed,
475
+ image_conditions=initial_image_conditions,
476
+ ltx_configs_override=ltx_configs_override
477
+ )
478
+
479
+ print("\n--- Finalizando Geração Simples: Salvando e decodificando ---")
480
+ log_tensor_info(final_latents, "Tensor de Latentes Final")
481
+
482
+ try:
483
+ with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
484
+ pixel_tensor = vae_manager_singleton.decode(final_latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
485
+ video_path = self._save_and_log_video(pixel_tensor, "single_video", FPS, temp_dir, results_dir, used_seed)
486
+ latents_cpu = latents.detach().to("cpu")
487
+ tensor_path = os.path.join(results_dir, f"latents_single_{used_seed}.pt")
488
+ torch.save(latents_cpu, tensor_path)
489
+ return video_path, tensor_path, used_seed
490
+ except Exception as e:
491
+ print(f"[DEBUG] falhou: {e}")
492
+ finally:
493
+ torch.cuda.empty_cache()
494
+ torch.cuda.ipc_collect()
495
+ self.finalize(keep_paths=[])
496
+
497
+
498
+
499
  # ==============================================================================
500
  # --- FUNÇÃO #4: ORQUESTRADOR (Upscaler + texturas hd) ---
501
  # ==============================================================================
502
  def generate_upscale_denoise(
503
+ self, latents_path, prompt, negative_prompt,
504
+ guidance_scale, seed,
505
  ):
506
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
507
  seed_everething(used_seed)
 
596
 
597
  # 4. Configurar o resto dos componentes com o dispositivo correto
598
  self._apply_precision_policy()
 
 
 
599
  vae_manager_singleton.attach_pipeline(
600
  self.pipeline,
601
  device=self.device, # Agora `self.device` está correto
 
604
  self._tmp_dirs = set()
605
  print(f"[DEBUG] VideoService pronto. boot_time={time.perf_counter()-t0:.3f}s")
606
 
607
+ # A função move_to_device que criamos antes é essencial aqui
608
  def move_to_device(self, device):
609
  """Move os modelos do pipeline para o dispositivo especificado."""
610
  print(f"[LTX] Movendo modelos para {device}...")
 
620
  if torch.cuda.is_available():
621
  torch.cuda.empty_cache()
622
 
623
+
624
+ # Instanciação limpa, sem usar `self` fora da classe.
625
  print("Criando instância do VideoService...")
626
  video_generation_service = VideoService()
627
  print("Instância do VideoService pronta.")