Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,8 +19,11 @@ import gradio as gr
|
|
| 19 |
import tempfile
|
| 20 |
import shutil
|
| 21 |
import subprocess
|
| 22 |
-
import
|
|
|
|
|
|
|
| 23 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
|
|
|
| 24 |
tf.get_logger().setLevel('ERROR')
|
| 25 |
|
| 26 |
# Initialize models and other global variables
|
|
@@ -39,7 +42,7 @@ def frame_to_timecode(frame_num, original_fps, desired_fps):
|
|
| 39 |
seconds = int(total_seconds % 60)
|
| 40 |
milliseconds = int((total_seconds - int(total_seconds)) * 1000)
|
| 41 |
return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
|
| 42 |
-
|
| 43 |
def get_face_embedding_and_emotion(face_img):
|
| 44 |
face_tensor = torch.tensor(face_img).permute(2, 0, 1).unsqueeze(0).float() / 255
|
| 45 |
face_tensor = (face_tensor - 0.5) / 0.5
|
|
@@ -98,16 +101,12 @@ def extract_frames(video_path, output_folder, fps):
|
|
| 98 |
print(f"FFmpeg stderr: {e.stderr}")
|
| 99 |
raise
|
| 100 |
|
| 101 |
-
import fractions
|
| 102 |
-
|
| 103 |
def extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired_fps):
|
| 104 |
print(f"Processing video: {video_path}")
|
| 105 |
|
| 106 |
-
# Extract frames using FFmpeg
|
| 107 |
frames_folder = os.path.join(os.path.dirname(aligned_faces_folder), 'extracted_frames')
|
| 108 |
extract_frames(video_path, frames_folder, desired_fps)
|
| 109 |
|
| 110 |
-
# Get video info
|
| 111 |
ffprobe_command = [
|
| 112 |
'ffprobe',
|
| 113 |
'-v', 'error',
|
|
@@ -119,7 +118,7 @@ def extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired
|
|
| 119 |
]
|
| 120 |
try:
|
| 121 |
ffprobe_output = subprocess.check_output(ffprobe_command, universal_newlines=True).strip().split(',')
|
| 122 |
-
print(f"FFprobe output: {ffprobe_output}")
|
| 123 |
|
| 124 |
if len(ffprobe_output) != 2:
|
| 125 |
raise ValueError(f"Unexpected FFprobe output format: {ffprobe_output}")
|
|
@@ -127,23 +126,20 @@ def extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired
|
|
| 127 |
frame_count = ffprobe_output[0]
|
| 128 |
frame_rate = ffprobe_output[1]
|
| 129 |
|
| 130 |
-
print(f"Frame count (raw): {frame_count}")
|
| 131 |
-
print(f"Frame rate (raw): {frame_rate}")
|
| 132 |
|
| 133 |
-
# Convert frame count to int
|
| 134 |
try:
|
| 135 |
frame_count = int(frame_count)
|
| 136 |
except ValueError:
|
| 137 |
print(f"Warning: Could not convert frame count '{frame_count}' to int. Using fallback method.")
|
| 138 |
frame_count = len([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
|
| 139 |
|
| 140 |
-
# Convert fractional frame rate to float
|
| 141 |
try:
|
| 142 |
frac = fractions.Fraction(frame_rate)
|
| 143 |
original_fps = float(frac.numerator) / float(frac.denominator)
|
| 144 |
except (ValueError, ZeroDivisionError):
|
| 145 |
print(f"Warning: Could not convert frame rate '{frame_rate}' to float. Using fallback method.")
|
| 146 |
-
# Fallback: Count frames and divide by video duration
|
| 147 |
frame_count = len([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
|
| 148 |
duration_command = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', video_path]
|
| 149 |
duration = float(subprocess.check_output(duration_command, universal_newlines=True).strip())
|
|
@@ -303,17 +299,27 @@ def lstm_anomaly_detection(X, feature_columns, num_anomalies=10, epochs=100, bat
|
|
| 303 |
with torch.no_grad():
|
| 304 |
reconstructed = model(X.unsqueeze(0)).squeeze(0).cpu().numpy()
|
| 305 |
|
| 306 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 311 |
|
| 312 |
-
return
|
|
|
|
|
|
|
| 313 |
|
| 314 |
def plot_anomaly_scores(df, anomaly_scores, top_indices, title):
|
| 315 |
fig, ax = plt.subplots(figsize=(16, 8))
|
| 316 |
-
bars = ax.bar(range(len(df)), anomaly_scores, width=0.8)
|
| 317 |
for i in top_indices:
|
| 318 |
bars[i].set_color('red')
|
| 319 |
ax.set_xlabel('Timecode')
|
|
@@ -325,23 +331,34 @@ def plot_anomaly_scores(df, anomaly_scores, top_indices, title):
|
|
| 325 |
plt.tight_layout()
|
| 326 |
return fig
|
| 327 |
|
| 328 |
-
def plot_emotion(df, emotion):
|
| 329 |
fig, ax = plt.subplots(figsize=(16, 8))
|
| 330 |
values = df[emotion].values
|
| 331 |
-
bars = ax.bar(range(len(df)), values, width=0.8)
|
| 332 |
-
|
| 333 |
-
for i
|
| 334 |
-
|
| 335 |
-
bar.set_color('red')
|
| 336 |
ax.set_xlabel('Timecode')
|
| 337 |
ax.set_ylabel(f'{emotion.capitalize()} Score')
|
| 338 |
-
ax.set_title(f'{emotion.capitalize()} Scores Over Time')
|
| 339 |
ax.xaxis.set_major_locator(MaxNLocator(nbins=100))
|
| 340 |
ticks = ax.get_xticks()
|
| 341 |
ax.set_xticklabels([df['Timecode'].iloc[int(tick)] if tick >= 0 and tick < len(df) else '' for tick in ticks], rotation=90, ha='right')
|
| 342 |
plt.tight_layout()
|
| 343 |
return fig
|
| 344 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 345 |
def process_video(video_path, num_anomalies, num_components, desired_fps, batch_size, progress=gr.Progress()):
|
| 346 |
with tempfile.TemporaryDirectory() as temp_dir:
|
| 347 |
aligned_faces_folder = os.path.join(temp_dir, 'aligned_faces')
|
|
@@ -353,10 +370,10 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
| 353 |
try:
|
| 354 |
embeddings_by_frame, emotions_by_frame, _, original_fps = extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired_fps)
|
| 355 |
except Exception as e:
|
| 356 |
-
return f"Error extracting faces: {str(e)}", None, None, None, None
|
| 357 |
|
| 358 |
if not embeddings_by_frame:
|
| 359 |
-
return "No faces were extracted from the video.", None, None, None, None
|
| 360 |
|
| 361 |
progress(0.3, "Clustering embeddings")
|
| 362 |
embeddings = list(embeddings_by_frame.values())
|
|
@@ -371,25 +388,37 @@ def process_video(video_path, num_anomalies, num_components, desired_fps, batch_
|
|
| 371 |
progress(0.6, "Performing anomaly detection")
|
| 372 |
feature_columns = [col for col in df.columns if col not in ['Frame', 'Timecode', 'Time (Minutes)', 'Embedding_Index']]
|
| 373 |
try:
|
| 374 |
-
anomalies_all, anomaly_scores_all, top_indices_all, _ = lstm_anomaly_detection(df[feature_columns].values, feature_columns, num_anomalies=num_anomalies, batch_size=batch_size)
|
| 375 |
except Exception as e:
|
| 376 |
-
return f"Error in anomaly detection: {str(e)}", None, None, None, None
|
| 377 |
|
| 378 |
progress(0.8, "Generating plots")
|
| 379 |
try:
|
| 380 |
-
|
| 381 |
-
|
|
|
|
|
|
|
| 382 |
except Exception as e:
|
| 383 |
-
return f"Error generating plots: {str(e)}", None, None, None, None
|
| 384 |
|
| 385 |
progress(0.9, "Preparing results")
|
| 386 |
results = f"Top {num_anomalies} anomalies (All Features):\n"
|
| 387 |
results += "\n".join([f"{score:.4f} at {timecode}" for score, timecode in
|
| 388 |
zip(anomaly_scores_all[top_indices_all], df['Timecode'].iloc[top_indices_all].values)])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 389 |
|
| 390 |
progress(1.0, "Complete")
|
| 391 |
-
return results,
|
| 392 |
-
|
|
|
|
| 393 |
iface = gr.Interface(
|
| 394 |
fn=process_video,
|
| 395 |
inputs=[
|
|
@@ -401,7 +430,9 @@ iface = gr.Interface(
|
|
| 401 |
],
|
| 402 |
outputs=[
|
| 403 |
gr.Textbox(label="Anomaly Detection Results"),
|
| 404 |
-
gr.Plot(label="Anomaly Scores"),
|
|
|
|
|
|
|
| 405 |
gr.Plot(label="Fear Scores"),
|
| 406 |
gr.Plot(label="Sad Scores"),
|
| 407 |
gr.Plot(label="Angry Scores")
|
|
@@ -412,9 +443,9 @@ iface = gr.Interface(
|
|
| 412 |
It focuses on the most frequently appearing person in the video for analysis.
|
| 413 |
|
| 414 |
Adjust the parameters as needed:
|
| 415 |
-
- Number of Anomalies: How many top anomalies
|
| 416 |
-
- Number of Components: Complexity of the facial expression model
|
| 417 |
-
- Desired FPS: Frames per second to analyze (lower for faster processing
|
| 418 |
- Batch Size: Affects processing speed and memory usage
|
| 419 |
|
| 420 |
"""
|
|
|
|
| 19 |
import tempfile
|
| 20 |
import shutil
|
| 21 |
import subprocess
|
| 22 |
+
import fractions
|
| 23 |
+
|
| 24 |
+
# Suppress TensorFlow warnings
|
| 25 |
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
| 26 |
+
import tensorflow as tf
|
| 27 |
tf.get_logger().setLevel('ERROR')
|
| 28 |
|
| 29 |
# Initialize models and other global variables
|
|
|
|
| 42 |
seconds = int(total_seconds % 60)
|
| 43 |
milliseconds = int((total_seconds - int(total_seconds)) * 1000)
|
| 44 |
return f"{hours:02d}:{minutes:02d}:{seconds:02d}.{milliseconds:03d}"
|
| 45 |
+
|
| 46 |
def get_face_embedding_and_emotion(face_img):
|
| 47 |
face_tensor = torch.tensor(face_img).permute(2, 0, 1).unsqueeze(0).float() / 255
|
| 48 |
face_tensor = (face_tensor - 0.5) / 0.5
|
|
|
|
| 101 |
print(f"FFmpeg stderr: {e.stderr}")
|
| 102 |
raise
|
| 103 |
|
|
|
|
|
|
|
| 104 |
def extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired_fps):
|
| 105 |
print(f"Processing video: {video_path}")
|
| 106 |
|
|
|
|
| 107 |
frames_folder = os.path.join(os.path.dirname(aligned_faces_folder), 'extracted_frames')
|
| 108 |
extract_frames(video_path, frames_folder, desired_fps)
|
| 109 |
|
|
|
|
| 110 |
ffprobe_command = [
|
| 111 |
'ffprobe',
|
| 112 |
'-v', 'error',
|
|
|
|
| 118 |
]
|
| 119 |
try:
|
| 120 |
ffprobe_output = subprocess.check_output(ffprobe_command, universal_newlines=True).strip().split(',')
|
| 121 |
+
print(f"FFprobe output: {ffprobe_output}")
|
| 122 |
|
| 123 |
if len(ffprobe_output) != 2:
|
| 124 |
raise ValueError(f"Unexpected FFprobe output format: {ffprobe_output}")
|
|
|
|
| 126 |
frame_count = ffprobe_output[0]
|
| 127 |
frame_rate = ffprobe_output[1]
|
| 128 |
|
| 129 |
+
print(f"Frame count (raw): {frame_count}")
|
| 130 |
+
print(f"Frame rate (raw): {frame_rate}")
|
| 131 |
|
|
|
|
| 132 |
try:
|
| 133 |
frame_count = int(frame_count)
|
| 134 |
except ValueError:
|
| 135 |
print(f"Warning: Could not convert frame count '{frame_count}' to int. Using fallback method.")
|
| 136 |
frame_count = len([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
|
| 137 |
|
|
|
|
| 138 |
try:
|
| 139 |
frac = fractions.Fraction(frame_rate)
|
| 140 |
original_fps = float(frac.numerator) / float(frac.denominator)
|
| 141 |
except (ValueError, ZeroDivisionError):
|
| 142 |
print(f"Warning: Could not convert frame rate '{frame_rate}' to float. Using fallback method.")
|
|
|
|
| 143 |
frame_count = len([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
|
| 144 |
duration_command = ['ffprobe', '-v', 'error', '-show_entries', 'format=duration', '-of', 'default=noprint_wrappers=1:nokey=1', video_path]
|
| 145 |
duration = float(subprocess.check_output(duration_command, universal_newlines=True).strip())
|
|
|
|
| 299 |
with torch.no_grad():
|
| 300 |
reconstructed = model(X.unsqueeze(0)).squeeze(0).cpu().numpy()
|
| 301 |
|
| 302 |
+
# Compute anomalies for all features
|
| 303 |
+
mse_all = np.mean(np.power(X.cpu().numpy() - reconstructed, 2), axis=1)
|
| 304 |
+
top_indices_all = mse_all.argsort()[-num_anomalies:][::-1]
|
| 305 |
+
anomalies_all = np.zeros(len(mse_all), dtype=bool)
|
| 306 |
+
anomalies_all[top_indices_all] = True
|
| 307 |
|
| 308 |
+
# Compute anomalies for components only
|
| 309 |
+
component_columns = [col for col in feature_columns if col.startswith('Comp')]
|
| 310 |
+
component_indices = [feature_columns.index(col) for col in component_columns]
|
| 311 |
+
mse_comp = np.mean(np.power(X.cpu().numpy()[:, component_indices] - reconstructed[:, component_indices], 2), axis=1)
|
| 312 |
+
top_indices_comp = mse_comp.argsort()[-num_anomalies:][::-1]
|
| 313 |
+
anomalies_comp = np.zeros(len(mse_comp), dtype=bool)
|
| 314 |
+
anomalies_comp[top_indices_comp] = True
|
| 315 |
|
| 316 |
+
return (anomalies_all, mse_all, top_indices_all,
|
| 317 |
+
anomalies_comp, mse_comp, top_indices_comp,
|
| 318 |
+
model)
|
| 319 |
|
| 320 |
def plot_anomaly_scores(df, anomaly_scores, top_indices, title):
|
| 321 |
fig, ax = plt.subplots(figsize=(16, 8))
|
| 322 |
+
bars = ax.bar(range(len(df)), anomaly_scores, width=0.8, color='skyblue')
|
| 323 |
for i in top_indices:
|
| 324 |
bars[i].set_color('red')
|
| 325 |
ax.set_xlabel('Timecode')
|
|
|
|
| 331 |
plt.tight_layout()
|
| 332 |
return fig
|
| 333 |
|
| 334 |
+
def plot_emotion(df, emotion, num_anomalies):
|
| 335 |
fig, ax = plt.subplots(figsize=(16, 8))
|
| 336 |
values = df[emotion].values
|
| 337 |
+
bars = ax.bar(range(len(df)), values, width=0.8, color='lightgreen')
|
| 338 |
+
top_indices = np.argsort(values)[-num_anomalies:][::-1]
|
| 339 |
+
for i in top_indices:
|
| 340 |
+
bars[i].set_color('red')
|
|
|
|
| 341 |
ax.set_xlabel('Timecode')
|
| 342 |
ax.set_ylabel(f'{emotion.capitalize()} Score')
|
| 343 |
+
ax.set_title(f'{emotion.capitalize()} Scores Over Time (Top {num_anomalies} in Red)')
|
| 344 |
ax.xaxis.set_major_locator(MaxNLocator(nbins=100))
|
| 345 |
ticks = ax.get_xticks()
|
| 346 |
ax.set_xticklabels([df['Timecode'].iloc[int(tick)] if tick >= 0 and tick < len(df) else '' for tick in ticks], rotation=90, ha='right')
|
| 347 |
plt.tight_layout()
|
| 348 |
return fig
|
| 349 |
|
| 350 |
+
def plot_components(df):
|
| 351 |
+
fig, ax = plt.subplots(figsize=(16, 8))
|
| 352 |
+
component_columns = [col for col in df.columns if col.startswith('Comp')]
|
| 353 |
+
for col in component_columns:
|
| 354 |
+
ax.plot(df['Time (Minutes)'], df[col], label=col)
|
| 355 |
+
ax.set_xlabel('Time (Minutes)')
|
| 356 |
+
ax.set_ylabel('Component Value')
|
| 357 |
+
ax.set_title('UMAP Components Over Time')
|
| 358 |
+
ax.legend()
|
| 359 |
+
plt.tight_layout()
|
| 360 |
+
return fig
|
| 361 |
+
|
| 362 |
def process_video(video_path, num_anomalies, num_components, desired_fps, batch_size, progress=gr.Progress()):
|
| 363 |
with tempfile.TemporaryDirectory() as temp_dir:
|
| 364 |
aligned_faces_folder = os.path.join(temp_dir, 'aligned_faces')
|
|
|
|
| 370 |
try:
|
| 371 |
embeddings_by_frame, emotions_by_frame, _, original_fps = extract_and_align_faces_from_video(video_path, aligned_faces_folder, desired_fps)
|
| 372 |
except Exception as e:
|
| 373 |
+
return f"Error extracting faces: {str(e)}", None, None, None, None, None, None
|
| 374 |
|
| 375 |
if not embeddings_by_frame:
|
| 376 |
+
return "No faces were extracted from the video.", None, None, None, None, None, None
|
| 377 |
|
| 378 |
progress(0.3, "Clustering embeddings")
|
| 379 |
embeddings = list(embeddings_by_frame.values())
|
|
|
|
| 388 |
progress(0.6, "Performing anomaly detection")
|
| 389 |
feature_columns = [col for col in df.columns if col not in ['Frame', 'Timecode', 'Time (Minutes)', 'Embedding_Index']]
|
| 390 |
try:
|
| 391 |
+
anomalies_all, anomaly_scores_all, top_indices_all, anomalies_comp, anomaly_scores_comp, top_indices_comp, _ = lstm_anomaly_detection(df[feature_columns].values, feature_columns, num_anomalies=num_anomalies, batch_size=batch_size)
|
| 392 |
except Exception as e:
|
| 393 |
+
return f"Error in anomaly detection: {str(e)}", None, None, None, None, None, None
|
| 394 |
|
| 395 |
progress(0.8, "Generating plots")
|
| 396 |
try:
|
| 397 |
+
anomaly_plot_all = plot_anomaly_scores(df, anomaly_scores_all, top_indices_all, "All Features")
|
| 398 |
+
anomaly_plot_comp = plot_anomaly_scores(df, anomaly_scores_comp, top_indices_comp, "Components Only")
|
| 399 |
+
components_plot = plot_components(df)
|
| 400 |
+
emotion_plots = [plot_emotion(df, emotion, num_anomalies) for emotion in ['fear', 'sad', 'angry']]
|
| 401 |
except Exception as e:
|
| 402 |
+
return f"Error generating plots: {str(e)}", None, None, None, None, None, None
|
| 403 |
|
| 404 |
progress(0.9, "Preparing results")
|
| 405 |
results = f"Top {num_anomalies} anomalies (All Features):\n"
|
| 406 |
results += "\n".join([f"{score:.4f} at {timecode}" for score, timecode in
|
| 407 |
zip(anomaly_scores_all[top_indices_all], df['Timecode'].iloc[top_indices_all].values)])
|
| 408 |
+
results += f"\n\nTop {num_anomalies} anomalies (Components Only):\n"
|
| 409 |
+
results += "\n".join([f"{score:.4f} at {timecode}" for score, timecode in
|
| 410 |
+
zip(anomaly_scores_comp[top_indices_comp], df['Timecode'].iloc[top_indices_comp].values)])
|
| 411 |
+
|
| 412 |
+
# Add top emotion scores to results
|
| 413 |
+
for emotion in ['fear', 'sad', 'angry']:
|
| 414 |
+
top_indices = np.argsort(df[emotion].values)[-num_anomalies:][::-1]
|
| 415 |
+
results += f"\n\nTop {num_anomalies} {emotion.capitalize()} Scores:\n"
|
| 416 |
+
results += "\n".join([f"{df[emotion].iloc[i]:.4f} at {df['Timecode'].iloc[i]}" for i in top_indices])
|
| 417 |
|
| 418 |
progress(1.0, "Complete")
|
| 419 |
+
return results, anomaly_plot_all, anomaly_plot_comp, components_plot, *emotion_plots
|
| 420 |
+
|
| 421 |
+
# Gradio interface
|
| 422 |
iface = gr.Interface(
|
| 423 |
fn=process_video,
|
| 424 |
inputs=[
|
|
|
|
| 430 |
],
|
| 431 |
outputs=[
|
| 432 |
gr.Textbox(label="Anomaly Detection Results"),
|
| 433 |
+
gr.Plot(label="Anomaly Scores (All Features)"),
|
| 434 |
+
gr.Plot(label="Anomaly Scores (Components Only)"),
|
| 435 |
+
gr.Plot(label="UMAP Components"),
|
| 436 |
gr.Plot(label="Fear Scores"),
|
| 437 |
gr.Plot(label="Sad Scores"),
|
| 438 |
gr.Plot(label="Angry Scores")
|
|
|
|
| 443 |
It focuses on the most frequently appearing person in the video for analysis.
|
| 444 |
|
| 445 |
Adjust the parameters as needed:
|
| 446 |
+
- Number of Anomalies: How many top anomalies or high intensities to highlight
|
| 447 |
+
- Number of Components: Complexity of the facial expression model
|
| 448 |
+
- Desired FPS: Frames per second to analyze (lower for faster processing)
|
| 449 |
- Batch Size: Affects processing speed and memory usage
|
| 450 |
|
| 451 |
"""
|