multimodalart HF Staff commited on
Commit
5a3e453
·
verified ·
1 Parent(s): 644e377

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -112,7 +112,7 @@ pathchsageattentionkj = NODE_CLASS_MAPPINGS["PathchSageAttentionKJ"]()
112
  MODELS_AND_NODES["clip"] = cliploader.load_clip(clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan")
113
  unet_low_noise = unetloader.load_unet(unet_name="wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
114
  unet_high_noise = unetloader.load_unet(unet_name="wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
115
- MODELS_AND_NODES["vae"] = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors", device="cpu")
116
  MODELS_AND_NODES["clip_vision"] = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors")
117
 
118
  # Chain all patching operations together for the final models
@@ -146,7 +146,7 @@ MODELS_AND_NODES["SaveVideo"] = NODE_CLASS_MAPPINGS["SaveVideo"]()
146
  print("Moving final models to GPU...")
147
  model_loaders_final = [
148
  MODELS_AND_NODES["clip"],
149
- MODELS_AND_NODES["vae"],
150
  MODELS_AND_NODES["model_low_noise"],
151
  MODELS_AND_NODES["model_high_noise"],
152
  MODELS_AND_NODES["clip_vision"],
 
112
  MODELS_AND_NODES["clip"] = cliploader.load_clip(clip_name="umt5_xxl_fp8_e4m3fn_scaled.safetensors", type="wan")
113
  unet_low_noise = unetloader.load_unet(unet_name="wan2.2_i2v_low_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
114
  unet_high_noise = unetloader.load_unet(unet_name="wan2.2_i2v_high_noise_14B_fp8_scaled.safetensors", weight_dtype="default")
115
+ MODELS_AND_NODES["vae"] = vaeloader.load_vae(vae_name="wan_2.1_vae.safetensors")
116
  MODELS_AND_NODES["clip_vision"] = clipvisionloader.load_clip(clip_name="clip_vision_h.safetensors")
117
 
118
  # Chain all patching operations together for the final models
 
146
  print("Moving final models to GPU...")
147
  model_loaders_final = [
148
  MODELS_AND_NODES["clip"],
149
+ # MODELS_AND_NODES["vae"],
150
  MODELS_AND_NODES["model_low_noise"],
151
  MODELS_AND_NODES["model_high_noise"],
152
  MODELS_AND_NODES["clip_vision"],