Eueuiaa commited on
Commit
525556c
·
verified ·
1 Parent(s): 0f094a3

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +17 -13
api/ltx_server_refactored.py CHANGED
@@ -341,13 +341,13 @@ class VideoService:
341
  try:
342
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
343
  latents_bruto = self.pipeline(**first_pass_kwargs).images
344
- latents_cpu_bruto = latents_bruto.detach().to("cpu")
345
- tensor_path_cpu = os.path.join(results_dir, f"latents_low_res.pt")
346
- torch.save(latents_cpu_bruto, tensor_path_cpu)
347
  log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
348
 
349
  print("-" * 20 + " FIM: _generate_single_chunk_low " + "-"*20)
350
- return latents_cpu_bruto
351
  except Exception as e:
352
  print("-" * 20 + f" ERRO: _generate_single_chunk_low {e} " + "-"*20)
353
  finally:
@@ -430,34 +430,38 @@ class VideoService:
430
 
431
  frames_per_chunk = ((frames_per_chunk//8)*8)+1
432
 
433
- latent_path = self._generate_single_chunk_low(
434
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
435
  num_frames=frames_per_chunk, guidance_scale=guidance_scale, seed=used_seed + i,
436
  initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
437
  ltx_configs_override=ltx_configs_override
438
  )
439
 
440
-
441
-
442
  #latent_path_bufer = load_tensor(latent_path)
443
  #final_latents = torch.cat(lista_tensores, dim=2).to(self.device)
444
 
445
 
446
- latentes_bruto = latent_path #torch.load(latent_path_bufer).to("cpu")
447
 
448
  #poda inicio overlap
449
- if i > 0 and poda_latents_num > 0:
450
- latentes_bruto = latentes_bruto[:, :, poda_latents_num:, :, :]
451
-
452
-
 
 
 
453
  # cria estado overlap para proximo
454
  if i < num_chunks - 1 and poda_latents_num > 0:
455
- overlap_latents = latentes_bruto[:, :, -poda_latents_num:, :, :].clone()
 
456
  overlap_latents = overlap_latents.detach().to(self.device)
457
  condition_item_latent_overlap = ConditioningItem(
458
  media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0
459
  )
460
 
 
461
  #adiciona a lista
462
  tensor_path_podado = os.path.join(results_dir, f"latents_poda{i}_res.pt")
463
  torch.save(latentes_bruto, tensor_path_podado)
 
341
  try:
342
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device.type == 'cuda'):
343
  latents_bruto = self.pipeline(**first_pass_kwargs).images
344
+ #latents_cpu_bruto = latents_bruto.detach().to("cpu")
345
+ #tensor_path_cpu = os.path.join(results_dir, f"latents_low_res.pt")
346
+ #torch.save(latents_cpu_bruto, tensor_path_cpu)
347
  log_tensor_info(latents_bruto, f"Latente Bruto Gerado para: '{prompt[:40]}...'")
348
 
349
  print("-" * 20 + " FIM: _generate_single_chunk_low " + "-"*20)
350
+ return latents_bruto
351
  except Exception as e:
352
  print("-" * 20 + f" ERRO: _generate_single_chunk_low {e} " + "-"*20)
353
  finally:
 
430
 
431
  frames_per_chunk = ((frames_per_chunk//8)*8)+1
432
 
433
+ latentes_bruto_r = self._generate_single_chunk_low(
434
  prompt=chunk_prompt, negative_prompt=negative_prompt, height=height, width=width,
435
  num_frames=frames_per_chunk, guidance_scale=guidance_scale, seed=used_seed + i,
436
  initial_latent_condition=condition_item_latent_overlap, image_conditions=current_image_conditions,
437
  ltx_configs_override=ltx_configs_override
438
  )
439
 
440
+ log_tensor_info(latentes_bruto_r, f"latentes_bruto_r recebidk: {i}...'")
441
+
442
  #latent_path_bufer = load_tensor(latent_path)
443
  #final_latents = torch.cat(lista_tensores, dim=2).to(self.device)
444
 
445
 
 
446
 
447
  #poda inicio overlap
448
+ if i > 0 and poda_latents_num > 0 and latentes_bruto_r.shape[2]>poda_latents_num:
449
+ latentes_bruto = latentes_bruto_r[:, :, poda_latents_num:, :, :].clone()
450
+ else:
451
+ latentes_bruto = latentes_bruto_r[:, :, :, :, :].clone()
452
+
453
+ log_tensor_info(latentes_bruto, f"latentes_bruto recebidk: {i}...'")
454
+
455
  # cria estado overlap para proximo
456
  if i < num_chunks - 1 and poda_latents_num > 0:
457
+ overlap_latents = latentes_bruto_r[:, :, -poda_latents_num:, :, :].clone()
458
+ log_tensor_info(overlap_latents, f"overlap_latents recebidk: {i}...'")
459
  overlap_latents = overlap_latents.detach().to(self.device)
460
  condition_item_latent_overlap = ConditioningItem(
461
  media_item=overlap_latents, media_frame_number=0, conditioning_strength=1.0
462
  )
463
 
464
+
465
  #adiciona a lista
466
  tensor_path_podado = os.path.join(results_dir, f"latents_poda{i}_res.pt")
467
  torch.save(latentes_bruto, tensor_path_podado)