Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,7 +6,6 @@ import torch.nn as nn
|
|
| 6 |
import torch.optim as optim
|
| 7 |
from facenet_pytorch import InceptionResnetV1, MTCNN
|
| 8 |
import mediapipe as mp
|
| 9 |
-
from fer import FER
|
| 10 |
from sklearn.cluster import KMeans
|
| 11 |
from sklearn.preprocessing import StandardScaler, MinMaxScaler
|
| 12 |
from sklearn.metrics import silhouette_score
|
|
@@ -25,7 +24,6 @@ mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.999, 0.999, 0.999], m
|
|
| 25 |
model = InceptionResnetV1(pretrained='vggface2').eval().to(device)
|
| 26 |
mp_face_mesh = mp.solutions.face_mesh
|
| 27 |
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.5)
|
| 28 |
-
emotion_detector = FER(mtcnn=False)
|
| 29 |
|
| 30 |
def frame_to_timecode(frame_num, original_fps, desired_fps):
|
| 31 |
total_seconds = frame_num / original_fps
|
|
@@ -42,12 +40,9 @@ def get_face_embedding_and_emotion(face_img):
|
|
| 42 |
with torch.no_grad():
|
| 43 |
embedding = model(face_tensor)
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
else:
|
| 49 |
-
emotion_dict = {e: 0 for e in ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']}
|
| 50 |
-
|
| 51 |
return embedding.cpu().numpy().flatten(), emotion_dict
|
| 52 |
|
| 53 |
def alignFace(img):
|
|
|
|
| 6 |
import torch.optim as optim
|
| 7 |
from facenet_pytorch import InceptionResnetV1, MTCNN
|
| 8 |
import mediapipe as mp
|
|
|
|
| 9 |
from sklearn.cluster import KMeans
|
| 10 |
from sklearn.preprocessing import StandardScaler, MinMaxScaler
|
| 11 |
from sklearn.metrics import silhouette_score
|
|
|
|
| 24 |
model = InceptionResnetV1(pretrained='vggface2').eval().to(device)
|
| 25 |
mp_face_mesh = mp.solutions.face_mesh
|
| 26 |
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.5)
|
|
|
|
| 27 |
|
| 28 |
def frame_to_timecode(frame_num, original_fps, desired_fps):
|
| 29 |
total_seconds = frame_num / original_fps
|
|
|
|
| 40 |
with torch.no_grad():
|
| 41 |
embedding = model(face_tensor)
|
| 42 |
|
| 43 |
+
# Placeholder for emotion detection
|
| 44 |
+
emotion_dict = {e: np.random.random() for e in ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']}
|
| 45 |
+
|
|
|
|
|
|
|
|
|
|
| 46 |
return embedding.cpu().numpy().flatten(), emotion_dict
|
| 47 |
|
| 48 |
def alignFace(img):
|