Eueuiaa commited on
Commit
5114d43
·
verified ·
1 Parent(s): b1c5e84

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +28 -26
api/ltx_server_refactored.py CHANGED
@@ -257,7 +257,10 @@ class VideoService:
257
  # --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
258
  # ==============================================================================
259
 
260
- def prepare_condition_items(self, items_list: List, height: int, width: int, num_frames: int):
 
 
 
261
  if not items_list: return []
262
  height_padded = ((height - 1) // 8 + 1) * 8
263
  width_padded = ((width - 1) // 8 + 1) * 8
@@ -269,12 +272,14 @@ class VideoService:
269
  conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
270
  return conditioning_items
271
 
272
- def _generate_low(
273
  self, prompt, negative_prompt,
274
  height, width, duration, seed,
275
- conditions_itens, ltx_configs_override,
 
 
276
  ):
277
- guidance_scale="4"
278
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
279
  seed_everething(used_seed)
280
  FPS = 24.0
@@ -292,31 +297,27 @@ class VideoService:
292
  first_pass_kwargs = {
293
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
294
  "num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
295
- "output_type": "latent", "conditioning_items": conditions_itens,
 
296
  **(self.config.get("first_pass", {}))
297
  }
298
- if True:
299
- ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
300
- with ctx:
301
  latents = self.pipeline(**first_pass_kwargs).images
302
- pixel_tensor = vae_manager_singleton.decode(latents, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
 
303
  latents_cpu = latents.detach().to("cpu")
304
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
305
  torch.save(latents_cpu, tensor_path)
306
- del latents; gc.collect(); torch.cuda.empty_cache()
307
- pixel_tensor_cpu = pixel_tensor.detach().to("cpu")
308
- video_path = self._save_and_log_video(pixel_tensor_cpu, "low_res_video", FPS, temp_dir, results_dir, used_seed)
309
- del pixel_tensor; gc.collect(); torch.cuda.empty_cache()
310
-
311
- return latents_cpu
312
- #except Exception as e:
313
- # traceback.print_exc()
314
- # return None
315
- #finally:
316
- # torch.cuda.empty_cache()
317
- # torch.cuda.ipc_collect()
318
- # self.finalize(keep_paths=[])
319
-
320
  # ==============================================================================
321
  # --- FUNÇÕES DE GERAÇÃO ATUALIZADAS E MODULARES ---
322
  # ==============================================================================
@@ -363,7 +364,7 @@ class VideoService:
363
  #ltx_configs_override["conditioning_items"] = current_conditions
364
 
365
 
366
- tensor_path = self._generate_low(
367
  prompt, negative_prompt, height,
368
  width, duration, seed,
369
  conditions_itens, ltx_configs_override,
@@ -393,16 +394,17 @@ class VideoService:
393
  #tensor_path = os.path.join(results_dir, f"latents_narrative_{used_seed}.pt")
394
  #torch.save(final_latents_cpu, tensor_path)
395
 
396
- if True:
397
  ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
398
  with ctx:
399
- final_latents_video = torch.cat(latentes_chunk_video_list, dim=2)
400
  pixel_tensor = vae_manager_singleton.decode(final_latents_video, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
401
  video_path = self._save_and_log_video(pixel_tensor, "narrative_video", FPS, temp_dir, results_dir, used_seed)
402
  tensor_path = os.path.join(results_dir, f"latents_narrative_{used_seed}.pt")
403
  torch.save(final_latents_video, tensor_path)
404
  del final_latents_video; gc.collect(); torch.cuda.empty_cache()
405
  del pixel_tensor; gc.collect(); torch.cuda.empty_cache()
 
406
  return video_path, tensor_path, used_seed
407
  #except Exception as e:
408
  # print("-" * 20 + f" ERRO: generate_narrative_low {e} " + "-"*20)
 
257
  # --- FUNÇÕES MODULARES COM A LÓGICA DE CHUNKING SIMPLIFICADA ---
258
  # ==============================================================================
259
 
260
+ def prepare_condition_items(
261
+ self, items_list: List, height: int,
262
+ width: int, num_frames: int,
263
+ ):
264
  if not items_list: return []
265
  height_padded = ((height - 1) // 8 + 1) * 8
266
  width_padded = ((width - 1) // 8 + 1) * 8
 
272
  conditioning_items.append(ConditioningItem(tensor, safe_frame, float(weight)))
273
  return conditioning_items
274
 
275
+ def generate_low(
276
  self, prompt, negative_prompt,
277
  height, width, duration, seed,
278
+ conditioning_items=None,
279
+ conditions_itens=None,
280
+ ltx_configs_override: dict = None,
281
  ):
282
+ guidance_scale=4
283
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
284
  seed_everething(used_seed)
285
  FPS = 24.0
 
297
  first_pass_kwargs = {
298
  "prompt": prompt, "negative_prompt": negative_prompt, "height": downscaled_height, "width": downscaled_width,
299
  "num_frames": actual_num_frames, "frame_rate": int(FPS), "generator": torch.Generator(device=self.device).manual_seed(used_seed),
300
+ "output_type": "latent", "conditioning_items": conditioning_items,
301
+ "guidance_scale": float(guidance_scale),
302
  **(self.config.get("first_pass", {}))
303
  }
304
+ try:
305
+ with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
 
306
  latents = self.pipeline(**first_pass_kwargs).images
307
+ pixel_tensor = vae_manager_singleton.decode(latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
308
+ video_path = self._save_and_log_video(pixel_tensor, "low_res_video", FPS, temp_dir, results_dir, used_seed)
309
  latents_cpu = latents.detach().to("cpu")
310
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
311
  torch.save(latents_cpu, tensor_path)
312
+ return video_path, tensor_path, used_seed
313
+
314
+ except Exception as e:
315
+ pass
316
+ finally:
317
+ torch.cuda.empty_cache()
318
+ torch.cuda.ipc_collect()
319
+ self.finalize(keep_paths=[])
320
+
 
 
 
 
 
321
  # ==============================================================================
322
  # --- FUNÇÕES DE GERAÇÃO ATUALIZADAS E MODULARES ---
323
  # ==============================================================================
 
364
  #ltx_configs_override["conditioning_items"] = current_conditions
365
 
366
 
367
+ video_path, tensor_path, used_seed = self._generate_low(
368
  prompt, negative_prompt, height,
369
  width, duration, seed,
370
  conditions_itens, ltx_configs_override,
 
394
  #tensor_path = os.path.join(results_dir, f"latents_narrative_{used_seed}.pt")
395
  #torch.save(final_latents_cpu, tensor_path)
396
 
397
+ if false:
398
  ctx = torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype) if self.device == "cuda" else contextlib.nullcontext()
399
  with ctx:
400
+ #final_latents_video = torch.cat(latentes_chunk_video_list, dim=2)
401
  pixel_tensor = vae_manager_singleton.decode(final_latents_video, decode_timestep=float(self.config.get("decode_timestep", 0.05)))
402
  video_path = self._save_and_log_video(pixel_tensor, "narrative_video", FPS, temp_dir, results_dir, used_seed)
403
  tensor_path = os.path.join(results_dir, f"latents_narrative_{used_seed}.pt")
404
  torch.save(final_latents_video, tensor_path)
405
  del final_latents_video; gc.collect(); torch.cuda.empty_cache()
406
  del pixel_tensor; gc.collect(); torch.cuda.empty_cache()
407
+
408
  return video_path, tensor_path, used_seed
409
  #except Exception as e:
410
  # print("-" * 20 + f" ERRO: generate_narrative_low {e} " + "-"*20)