Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -30,10 +30,10 @@ matplotlib.rcParams['savefig.dpi'] = 400
|
|
| 30 |
# Initialize models and other global variables
|
| 31 |
device = 'cuda'
|
| 32 |
|
| 33 |
-
mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.
|
| 34 |
model = InceptionResnetV1(pretrained='vggface2').eval().to(device)
|
| 35 |
mp_face_mesh = mp.solutions.face_mesh
|
| 36 |
-
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.
|
| 37 |
emotion_detector = FER(mtcnn=False)
|
| 38 |
|
| 39 |
|
|
@@ -277,7 +277,7 @@ class LSTMAutoencoder(nn.Module):
|
|
| 277 |
return out
|
| 278 |
|
| 279 |
|
| 280 |
-
def lstm_anomaly_detection(X, feature_columns, raw_embedding_columns, epochs=100, batch_size=
|
| 281 |
device = 'cuda'
|
| 282 |
X = torch.FloatTensor(X).to(device)
|
| 283 |
if X.dim() == 2:
|
|
@@ -322,7 +322,7 @@ def lstm_anomaly_detection(X, feature_columns, raw_embedding_columns, epochs=100
|
|
| 322 |
|
| 323 |
return mse_all, mse_comp, mse_raw
|
| 324 |
|
| 325 |
-
def embedding_anomaly_detection(embeddings, epochs=100, batch_size=
|
| 326 |
device = 'cuda'
|
| 327 |
X = torch.FloatTensor(embeddings).to(device)
|
| 328 |
if X.dim() == 2:
|
|
@@ -349,7 +349,7 @@ def embedding_anomaly_detection(embeddings, epochs=100, batch_size=64):
|
|
| 349 |
mse = np.mean(np.power(X.squeeze(0).cpu().numpy() - reconstructed, 2), axis=1)
|
| 350 |
return mse
|
| 351 |
|
| 352 |
-
def determine_anomalies(mse_values, threshold=
|
| 353 |
mean = np.mean(mse_values)
|
| 354 |
std = np.std(mse_values)
|
| 355 |
anomalies = mse_values > (mean + threshold * std)
|
|
@@ -558,7 +558,7 @@ iface = gr.Interface(
|
|
| 558 |
inputs=[
|
| 559 |
gr.Video(),
|
| 560 |
gr.Slider(minimum=1, maximum=20, step=1, value=10, label="Desired FPS"),
|
| 561 |
-
gr.Slider(minimum=1, maximum=32, step=1, value=
|
| 562 |
],
|
| 563 |
outputs=[
|
| 564 |
gr.Textbox(label="Anomaly Detection Results"),
|
|
|
|
| 30 |
# Initialize models and other global variables
|
| 31 |
device = 'cuda'
|
| 32 |
|
| 33 |
+
mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.985, 0.985, 0.985], min_face_size=80)
|
| 34 |
model = InceptionResnetV1(pretrained='vggface2').eval().to(device)
|
| 35 |
mp_face_mesh = mp.solutions.face_mesh
|
| 36 |
+
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.8)
|
| 37 |
emotion_detector = FER(mtcnn=False)
|
| 38 |
|
| 39 |
|
|
|
|
| 277 |
return out
|
| 278 |
|
| 279 |
|
| 280 |
+
def lstm_anomaly_detection(X, feature_columns, raw_embedding_columns, epochs=100, batch_size=8):
|
| 281 |
device = 'cuda'
|
| 282 |
X = torch.FloatTensor(X).to(device)
|
| 283 |
if X.dim() == 2:
|
|
|
|
| 322 |
|
| 323 |
return mse_all, mse_comp, mse_raw
|
| 324 |
|
| 325 |
+
def embedding_anomaly_detection(embeddings, epochs=100, batch_size=8):
|
| 326 |
device = 'cuda'
|
| 327 |
X = torch.FloatTensor(embeddings).to(device)
|
| 328 |
if X.dim() == 2:
|
|
|
|
| 349 |
mse = np.mean(np.power(X.squeeze(0).cpu().numpy() - reconstructed, 2), axis=1)
|
| 350 |
return mse
|
| 351 |
|
| 352 |
+
def determine_anomalies(mse_values, threshold=5):
|
| 353 |
mean = np.mean(mse_values)
|
| 354 |
std = np.std(mse_values)
|
| 355 |
anomalies = mse_values > (mean + threshold * std)
|
|
|
|
| 558 |
inputs=[
|
| 559 |
gr.Video(),
|
| 560 |
gr.Slider(minimum=1, maximum=20, step=1, value=10, label="Desired FPS"),
|
| 561 |
+
gr.Slider(minimum=1, maximum=32, step=1, value=8, label="Batch Size")
|
| 562 |
],
|
| 563 |
outputs=[
|
| 564 |
gr.Textbox(label="Anomaly Detection Results"),
|