Eueuiaa commited on
Commit
7788eec
·
verified ·
1 Parent(s): e9abd79

Upload pipeline_ltx_video.py

Browse files
LTX-Video/ltx_video/pipelines/pipeline_ltx_video.py CHANGED
@@ -1206,6 +1206,11 @@ class LTXVideoPipeline(DiffusionPipeline):
1206
  orig_conditioning_mask,
1207
  generator,
1208
  )
 
 
 
 
 
1209
 
1210
  latent_model_input = (
1211
  torch.cat([latents] * num_conds) if num_conds > 1 else latents
@@ -1213,6 +1218,11 @@ class LTXVideoPipeline(DiffusionPipeline):
1213
  latent_model_input = self.scheduler.scale_model_input(
1214
  latent_model_input, t
1215
  )
 
 
 
 
 
1216
 
1217
  current_timestep = t
1218
  if not torch.is_tensor(current_timestep):
@@ -1328,6 +1338,11 @@ class LTXVideoPipeline(DiffusionPipeline):
1328
  extra_step_kwargs,
1329
  stochastic_sampling=stochastic_sampling,
1330
  )
 
 
 
 
 
1331
 
1332
  # call the callback, if provided
1333
  if i == len(timesteps) - 1 or (
@@ -1341,7 +1356,7 @@ class LTXVideoPipeline(DiffusionPipeline):
1341
 
1342
 
1343
  try:
1344
- print(f"[LTX6]LATENTS {latents.shape}")
1345
  except Exception:
1346
  pass
1347
 
@@ -1352,6 +1367,12 @@ class LTXVideoPipeline(DiffusionPipeline):
1352
 
1353
  # Remove the added conditioning latents
1354
  latents = latents[:, num_cond_latents:]
 
 
 
 
 
 
1355
 
1356
  latents = self.patchifier.unpatchify(
1357
  latents=latents,
@@ -1379,7 +1400,8 @@ class LTXVideoPipeline(DiffusionPipeline):
1379
  )
1380
  else:
1381
  decode_timestep = None
1382
- latents = self.tone_map_latents(latents, tone_map_compression_ratio)
 
1383
  image = vae_decode(
1384
  latents,
1385
  self.vae,
@@ -1387,6 +1409,11 @@ class LTXVideoPipeline(DiffusionPipeline):
1387
  vae_per_channel_normalize=kwargs["vae_per_channel_normalize"],
1388
  timestep=decode_timestep,
1389
  )
 
 
 
 
 
1390
 
1391
  image = self.image_processor.postprocess(image, output_type=output_type)
1392
 
 
1206
  orig_conditioning_mask,
1207
  generator,
1208
  )
1209
+
1210
+ try:
1211
+ print(f"[LTX6]LATENTS {latents.shape}")
1212
+ except Exception:
1213
+ pass
1214
 
1215
  latent_model_input = (
1216
  torch.cat([latents] * num_conds) if num_conds > 1 else latents
 
1218
  latent_model_input = self.scheduler.scale_model_input(
1219
  latent_model_input, t
1220
  )
1221
+
1222
+ try:
1223
+ print(f"[LTX7]LATENTS {latent_model_input.shape}")
1224
+ except Exception:
1225
+ pass
1226
 
1227
  current_timestep = t
1228
  if not torch.is_tensor(current_timestep):
 
1338
  extra_step_kwargs,
1339
  stochastic_sampling=stochastic_sampling,
1340
  )
1341
+
1342
+ try:
1343
+ print(f"[LTX8]LATENTS {latents.shape}")
1344
+ except Exception:
1345
+ pass
1346
 
1347
  # call the callback, if provided
1348
  if i == len(timesteps) - 1 or (
 
1356
 
1357
 
1358
  try:
1359
+ print(f"[LTX9]LATENTS {latents.shape}")
1360
  except Exception:
1361
  pass
1362
 
 
1367
 
1368
  # Remove the added conditioning latents
1369
  latents = latents[:, num_cond_latents:]
1370
+
1371
+
1372
+ try:
1373
+ print(f"[LTX10]LATENTS {latents.shape}")
1374
+ except Exception:
1375
+ pass
1376
 
1377
  latents = self.patchifier.unpatchify(
1378
  latents=latents,
 
1400
  )
1401
  else:
1402
  decode_timestep = None
1403
+ latents = self.tone_map_latents(latents, tone_map
1404
+ _compression_ratio)
1405
  image = vae_decode(
1406
  latents,
1407
  self.vae,
 
1409
  vae_per_channel_normalize=kwargs["vae_per_channel_normalize"],
1410
  timestep=decode_timestep,
1411
  )
1412
+
1413
+ try:
1414
+ print(f"[LTX11]LATENTS {latents.shape}")
1415
+ except Exception:
1416
+ pass
1417
 
1418
  image = self.image_processor.postprocess(image, output_type=output_type)
1419