Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -24,8 +24,8 @@ import tensorflow as tf
|
|
| 24 |
print(torch.__version__)
|
| 25 |
print(torch.version.cuda)
|
| 26 |
|
| 27 |
-
matplotlib.rcParams['figure.dpi'] =
|
| 28 |
-
matplotlib.rcParams['savefig.dpi'] =
|
| 29 |
|
| 30 |
# Initialize models and other global variables
|
| 31 |
device = 'cuda'
|
|
@@ -356,7 +356,7 @@ def determine_anomalies(mse_values, threshold=5):
|
|
| 356 |
return anomalies
|
| 357 |
|
| 358 |
|
| 359 |
-
def plot_mse(df, mse_values, title, color='blue', time_threshold=1, hide_first_n=
|
| 360 |
plt.figure(figsize=(16, 8), dpi=300)
|
| 361 |
fig, ax = plt.subplots(figsize=(16, 8))
|
| 362 |
|
|
@@ -514,9 +514,9 @@ def process_video(video_path, desired_fps, batch_size, progress=gr.Progress()):
|
|
| 514 |
X, feature_columns, raw_embedding_columns, batch_size=batch_size)
|
| 515 |
|
| 516 |
progress(0.95, "Generating plots")
|
| 517 |
-
mse_plot_all = plot_mse(df, mse_all, "Facial Features + Emotions", color='blue', hide_first_n=
|
| 518 |
-
mse_plot_comp = plot_mse(df, mse_comp, "Facial Features", color='deepskyblue', hide_first_n=
|
| 519 |
-
mse_plot_raw = plot_mse(df, mse_raw, "Facial Embeddings", color='steelblue', hide_first_n=
|
| 520 |
|
| 521 |
emotion_plots = [
|
| 522 |
plot_mse(df, embedding_anomaly_detection(df[emotion].values.reshape(-1, 1)),
|
|
@@ -548,8 +548,8 @@ def process_video(video_path, desired_fps, batch_size, progress=gr.Progress()):
|
|
| 548 |
|
| 549 |
# Define gallery outputs
|
| 550 |
gallery_outputs = [
|
| 551 |
-
gr.Gallery(label="Most Frequent Person Random Samples", columns=
|
| 552 |
-
gr.Gallery(label="Other Persons Random Samples", columns=
|
| 553 |
]
|
| 554 |
|
| 555 |
# Update the Gradio interface
|
|
@@ -558,7 +558,7 @@ iface = gr.Interface(
|
|
| 558 |
inputs=[
|
| 559 |
gr.Video(),
|
| 560 |
gr.Slider(minimum=1, maximum=20, step=1, value=10, label="Desired FPS"),
|
| 561 |
-
gr.Slider(minimum=1, maximum=32, step=1, value=
|
| 562 |
],
|
| 563 |
outputs=[
|
| 564 |
gr.Textbox(label="Anomaly Detection Results"),
|
|
|
|
| 24 |
print(torch.__version__)
|
| 25 |
print(torch.version.cuda)
|
| 26 |
|
| 27 |
+
matplotlib.rcParams['figure.dpi'] = 500
|
| 28 |
+
matplotlib.rcParams['savefig.dpi'] = 500
|
| 29 |
|
| 30 |
# Initialize models and other global variables
|
| 31 |
device = 'cuda'
|
|
|
|
| 356 |
return anomalies
|
| 357 |
|
| 358 |
|
| 359 |
+
def plot_mse(df, mse_values, title, color='blue', time_threshold=1, hide_first_n=2):
|
| 360 |
plt.figure(figsize=(16, 8), dpi=300)
|
| 361 |
fig, ax = plt.subplots(figsize=(16, 8))
|
| 362 |
|
|
|
|
| 514 |
X, feature_columns, raw_embedding_columns, batch_size=batch_size)
|
| 515 |
|
| 516 |
progress(0.95, "Generating plots")
|
| 517 |
+
mse_plot_all = plot_mse(df, mse_all, "Facial Features + Emotions", color='blue', hide_first_n=2)
|
| 518 |
+
mse_plot_comp = plot_mse(df, mse_comp, "Facial Features", color='deepskyblue', hide_first_n=2)
|
| 519 |
+
mse_plot_raw = plot_mse(df, mse_raw, "Facial Embeddings", color='steelblue', hide_first_n=2)
|
| 520 |
|
| 521 |
emotion_plots = [
|
| 522 |
plot_mse(df, embedding_anomaly_detection(df[emotion].values.reshape(-1, 1)),
|
|
|
|
| 548 |
|
| 549 |
# Define gallery outputs
|
| 550 |
gallery_outputs = [
|
| 551 |
+
gr.Gallery(label="Most Frequent Person Random Samples", columns=10, rows=2, height="auto"),
|
| 552 |
+
gr.Gallery(label="Other Persons Random Samples", columns=10, rows=1, height="auto")
|
| 553 |
]
|
| 554 |
|
| 555 |
# Update the Gradio interface
|
|
|
|
| 558 |
inputs=[
|
| 559 |
gr.Video(),
|
| 560 |
gr.Slider(minimum=1, maximum=20, step=1, value=10, label="Desired FPS"),
|
| 561 |
+
gr.Slider(minimum=1, maximum=32, step=1, value=10, label="Batch Size")
|
| 562 |
],
|
| 563 |
outputs=[
|
| 564 |
gr.Textbox(label="Anomaly Detection Results"),
|