alexander00001 commited on
Commit
c47d675
·
verified ·
1 Parent(s): 1aa6f43

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -50
app.py CHANGED
@@ -43,10 +43,25 @@ STYLE_PRESETS = {
43
  "Artistic": "artistic style, creative composition, unique visual style, expressive animation, stylized rendering"
44
  }
45
 
46
- # 固定模型配置 - 使用您的完整私人仓库(添加配置文件后)
47
- PRIVATE_MODEL = "alexander00001/NSFW_Wan_14b"
48
- # 备用官方模型
49
- FALLBACK_MODEL = "THUDM/CogVideoX-5b"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  # 质量增强提示词 - 适配视频
52
  QUALITY_ENHANCERS = [
@@ -116,69 +131,57 @@ def initialize_model():
116
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
117
  print(f"🖥️ Using device: {device}")
118
 
119
- print(f"Loading Private NSFW Model: {PRIVATE_MODEL}")
120
- print(f"Fallback Model: {FALLBACK_MODEL}")
121
 
122
- # 首先尝试加载您的私人NSFW模型(完整仓库结构)
123
  try:
124
- # 使用WanPipeline而不是CogVideoXPipeline
125
- from diffusers import WanPipeline
126
- pipeline = WanPipeline.from_pretrained(
127
- PRIVATE_MODEL,
128
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
129
- use_safetensors=True,
130
- trust_remote_code=True,
131
- # 更激进的内存优化
132
- text_encoder_dtype=torch.float32,
133
- device_map="balanced",
134
- load_in_8bit=True, # 8bit量化
135
- low_cpu_mem_usage=True # 低CPU内存使用
136
  )
137
- print("Successfully loaded private NSFW Wan model with memory optimization!")
 
 
 
 
 
 
 
 
 
 
 
 
138
 
139
- except Exception as private_error:
140
- print(f"Private Wan model loading failed: {private_error}")
141
- print(f"Falling back to official model: {FALLBACK_MODEL}")
 
 
 
142
 
143
- # 备选:尝试官方Wan2.2-Diffusers版本
144
- try:
145
- pipeline = WanPipeline.from_pretrained(
146
- "Wan-AI/Wan2.2-T2V-A14B-Diffusers",
147
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
148
- use_safetensors=True,
149
- text_encoder_dtype=torch.float32,
150
- device_map="balanced"
151
- )
152
- print("Loaded official Wan2.2-Diffusers model")
153
- except Exception as wan_error:
154
- print(f"Official Wan loading failed: {wan_error}")
155
- # 最后备选:CogVideoX
156
- from diffusers import CogVideoXPipeline
157
- pipeline = CogVideoXPipeline.from_pretrained(
158
- FALLBACK_MODEL,
159
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
160
- use_safetensors=True
161
- )
162
- print("Loaded CogVideoX as final fallback")
163
 
164
  pipeline = pipeline.to(device)
165
 
166
- # GPU优化 - Wan模型专用内存管理
167
  if torch.cuda.is_available():
168
  try:
169
- # Wan模型特有的优化方法
170
  if hasattr(pipeline, 'enable_model_cpu_offload'):
171
- pipeline.enable_model_cpu_offload() # 将部分组件移至CPU
172
  if hasattr(pipeline, 'enable_vae_tiling'):
173
- pipeline.enable_vae_tiling() # VAE分块处理
174
- if hasattr(pipeline, 'enable_sequential_cpu_offload'):
175
- pipeline.enable_sequential_cpu_offload() # 顺序CPU卸载
176
- # 通用内存优化
177
  try:
178
  pipeline.enable_xformers_memory_efficient_attention()
179
  except:
180
  pass
181
- print("Wan model memory optimizations applied")
182
  except Exception as mem_error:
183
  print(f"Memory optimization warning: {mem_error}")
184
 
 
43
  "Artistic": "artistic style, creative composition, unique visual style, expressive animation, stylized rendering"
44
  }
45
 
46
+ # 固定模型配置 - 使用CogVideoX + LoRA架构
47
+ BASE_MODEL = "THUDM/CogVideoX-5b" # 稳定的官方base model
48
+ # 实际可用的LoRA适配器列表
49
+ LORA_CONFIGS = [
50
+ {
51
+ "repo_id": "hashu786/CogVideoX-LoRA-CineCam",
52
+ "filename": "pytorch_lora_weights.safetensors",
53
+ "adapter_name": "cinematic_camera",
54
+ "scale": 0.6
55
+ },
56
+ {
57
+ "repo_id": "alibaba-pai/CogVideoX-Fun-V1.1-Reward-LoRAs",
58
+ "filename": "pytorch_lora_weights.safetensors",
59
+ "adapter_name": "quality_reward",
60
+ "scale": 0.8
61
+ }
62
+ # 注意:由于是NSFW内容,暂时使用增强质量的LoRA
63
+ # 您可以later添加专门的NSFW LoRA
64
+ ]
65
 
66
  # 质量增强提示词 - 适配视频
67
  QUALITY_ENHANCERS = [
 
131
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
132
  print(f"🖥️ Using device: {device}")
133
 
134
+ print(f"Loading CogVideoX base model: {BASE_MODEL}")
135
+ print(f"LoRA configurations: {len(LORA_CONFIGS)} adapters")
136
 
137
+ # 加载基础CogVideoX模型
138
  try:
139
+ from diffusers import CogVideoXPipeline
140
+ pipeline = CogVideoXPipeline.from_pretrained(
141
+ BASE_MODEL,
 
142
  torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
143
+ use_safetensors=True
 
 
 
 
 
 
144
  )
145
+ print("Successfully loaded CogVideoX base model!")
146
+
147
+ # 加载LoRA适配器
148
+ for lora_config in LORA_CONFIGS:
149
+ try:
150
+ pipeline.load_lora_weights(
151
+ lora_config["repo_id"],
152
+ weight_name=lora_config["filename"],
153
+ adapter_name=lora_config["adapter_name"]
154
+ )
155
+ print(f"✓ Loaded LoRA: {lora_config['adapter_name']}")
156
+ except Exception as lora_error:
157
+ print(f"⚠ LoRA loading failed ({lora_config['adapter_name']}): {lora_error}")
158
 
159
+ # 设置LoRA权重
160
+ adapter_names = [config["adapter_name"] for config in LORA_CONFIGS]
161
+ adapter_weights = [config["scale"] for config in LORA_CONFIGS]
162
+ if adapter_names:
163
+ pipeline.set_adapters(adapter_names, adapter_weights)
164
+ print(f"✓ Applied LoRA adapters with weights: {adapter_weights}")
165
 
166
+ except Exception as base_error:
167
+ print(f"Base model loading failed: {base_error}")
168
+ print("This should not happen with official CogVideoX model")
169
+ return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
  pipeline = pipeline.to(device)
172
 
173
+ # GPU优化 - CogVideoX优化(更简单可靠)
174
  if torch.cuda.is_available():
175
  try:
 
176
  if hasattr(pipeline, 'enable_model_cpu_offload'):
177
+ pipeline.enable_model_cpu_offload()
178
  if hasattr(pipeline, 'enable_vae_tiling'):
179
+ pipeline.enable_vae_tiling()
 
 
 
180
  try:
181
  pipeline.enable_xformers_memory_efficient_attention()
182
  except:
183
  pass
184
+ print("CogVideoX memory optimizations applied")
185
  except Exception as mem_error:
186
  print(f"Memory optimization warning: {mem_error}")
187