Upload 2 files
Browse files- app.py +39 -4
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -54,7 +54,7 @@ def encode_video(video_path, max_num_frames=32):
|
|
| 54 |
return []
|
| 55 |
|
| 56 |
def load_model():
|
| 57 |
-
"""Load MiniCPM-o 2.6 model and tokenizer"""
|
| 58 |
global model, tokenizer
|
| 59 |
|
| 60 |
if model is not None and tokenizer is not None:
|
|
@@ -69,11 +69,20 @@ def load_model():
|
|
| 69 |
|
| 70 |
print(f"Loading on device: {device} with dtype: {torch_dtype}")
|
| 71 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
# Load model with memory optimization for Spaces
|
| 73 |
model = AutoModel.from_pretrained(
|
| 74 |
model_path,
|
| 75 |
trust_remote_code=True,
|
| 76 |
-
attn_implementation=
|
| 77 |
torch_dtype=torch_dtype,
|
| 78 |
device_map="auto",
|
| 79 |
offload_buffers=True, # Enable buffer offloading for GPU memory
|
|
@@ -89,8 +98,34 @@ def load_model():
|
|
| 89 |
return model, tokenizer
|
| 90 |
|
| 91 |
except Exception as e:
|
| 92 |
-
|
| 93 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
|
| 95 |
def analyze_video(video_file, prompt, max_frames):
|
| 96 |
"""Analyze video using MiniCPM-o 2.6"""
|
|
|
|
| 54 |
return []
|
| 55 |
|
| 56 |
def load_model():
|
| 57 |
+
"""Load MiniCPM-o 2.6 model and tokenizer with fallback attention implementation"""
|
| 58 |
global model, tokenizer
|
| 59 |
|
| 60 |
if model is not None and tokenizer is not None:
|
|
|
|
| 69 |
|
| 70 |
print(f"Loading on device: {device} with dtype: {torch_dtype}")
|
| 71 |
|
| 72 |
+
# Try to determine best attention implementation
|
| 73 |
+
attn_implementation = 'sdpa' # Default fallback
|
| 74 |
+
try:
|
| 75 |
+
import flash_attn
|
| 76 |
+
attn_implementation = 'flash_attention_2'
|
| 77 |
+
print("✅ Using Flash Attention 2 for optimal performance")
|
| 78 |
+
except ImportError:
|
| 79 |
+
print("⚠️ Flash Attention not available, using SDPA (still good performance)")
|
| 80 |
+
|
| 81 |
# Load model with memory optimization for Spaces
|
| 82 |
model = AutoModel.from_pretrained(
|
| 83 |
model_path,
|
| 84 |
trust_remote_code=True,
|
| 85 |
+
attn_implementation=attn_implementation,
|
| 86 |
torch_dtype=torch_dtype,
|
| 87 |
device_map="auto",
|
| 88 |
offload_buffers=True, # Enable buffer offloading for GPU memory
|
|
|
|
| 98 |
return model, tokenizer
|
| 99 |
|
| 100 |
except Exception as e:
|
| 101 |
+
# If flash_attn fails, try with SDPA as fallback
|
| 102 |
+
if "flash_attn" in str(e):
|
| 103 |
+
print("⚠️ Flash Attention failed, retrying with SDPA...")
|
| 104 |
+
try:
|
| 105 |
+
model = AutoModel.from_pretrained(
|
| 106 |
+
model_path,
|
| 107 |
+
trust_remote_code=True,
|
| 108 |
+
attn_implementation='sdpa',
|
| 109 |
+
torch_dtype=torch_dtype,
|
| 110 |
+
device_map="auto",
|
| 111 |
+
offload_buffers=True,
|
| 112 |
+
low_cpu_mem_usage=True
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 116 |
+
model_path,
|
| 117 |
+
trust_remote_code=True
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
print("✅ Model loaded successfully with SDPA fallback!")
|
| 121 |
+
return model, tokenizer
|
| 122 |
+
|
| 123 |
+
except Exception as fallback_e:
|
| 124 |
+
print(f"❌ Error loading model with fallback: {fallback_e}")
|
| 125 |
+
raise fallback_e
|
| 126 |
+
else:
|
| 127 |
+
print(f"❌ Error loading model: {e}")
|
| 128 |
+
raise e
|
| 129 |
|
| 130 |
def analyze_video(video_file, prompt, max_frames):
|
| 131 |
"""Analyze video using MiniCPM-o 2.6"""
|
requirements.txt
CHANGED
|
@@ -18,4 +18,5 @@ numpy>=1.24.0
|
|
| 18 |
huggingface_hub>=0.33.2
|
| 19 |
protobuf==3.20.3
|
| 20 |
einops>=0.8.0
|
| 21 |
-
einx>=0.3.0
|
|
|
|
|
|
| 18 |
huggingface_hub>=0.33.2
|
| 19 |
protobuf==3.20.3
|
| 20 |
einops>=0.8.0
|
| 21 |
+
einx>=0.3.0
|
| 22 |
+
flash-attn==2.8.0.post2
|