Spaces:
Runtime error
Runtime error
Update video_processing.py
Browse files- video_processing.py +2 -22
video_processing.py
CHANGED
|
@@ -10,8 +10,7 @@ from face_analysis import get_face_embedding, cluster_faces, organize_faces_by_p
|
|
| 10 |
from pose_analysis import calculate_posture_score, draw_pose_landmarks
|
| 11 |
from anomaly_detection import anomaly_detection
|
| 12 |
from visualization import plot_mse, plot_mse_histogram, plot_mse_heatmap
|
| 13 |
-
from utils import frame_to_timecode
|
| 14 |
-
from transcribe import transcribe
|
| 15 |
import pandas as pd
|
| 16 |
from facenet_pytorch import MTCNN
|
| 17 |
import torch
|
|
@@ -183,23 +182,6 @@ def process_video(video_path, anomaly_threshold, desired_fps, progress=None):
|
|
| 183 |
traceback.print_exc()
|
| 184 |
return (f"Error in video processing: {str(e)}",) + (None,) * 14
|
| 185 |
|
| 186 |
-
# Add transcription
|
| 187 |
-
progress(0.96, "Transcribing video")
|
| 188 |
-
transcription_output = transcribe(video_path, transcribe_to_text=True, transcribe_to_srt=False,
|
| 189 |
-
target_language='en')
|
| 190 |
-
|
| 191 |
-
# Parse the transcription output to get sentences and their timecodes
|
| 192 |
-
sentences_with_timecodes = parse_transcription(transcription_output, video_duration)
|
| 193 |
-
|
| 194 |
-
# Get anomaly timecodes
|
| 195 |
-
anomaly_timecodes_features = [df[df['Frame'] == frame]['Timecode'].iloc[0] for frame in
|
| 196 |
-
anomaly_frames_embeddings]
|
| 197 |
-
anomaly_timecodes_posture = [df[df['Frame'] == frame]['Timecode'].iloc[0] for frame in anomaly_frames_posture]
|
| 198 |
-
|
| 199 |
-
anomaly_sentences_features = get_sentences_before_anomalies(sentences_with_timecodes,
|
| 200 |
-
anomaly_timecodes_features)
|
| 201 |
-
anomaly_sentences_posture = get_sentences_before_anomalies(sentences_with_timecodes,
|
| 202 |
-
anomaly_timecodes_posture)
|
| 203 |
progress(1.0, "Preparing results")
|
| 204 |
results = f"Number of persons detected: {num_clusters}\n\n"
|
| 205 |
results += "Breakdown:\n"
|
|
@@ -259,9 +241,7 @@ def process_video(video_path, anomaly_threshold, desired_fps, progress=None):
|
|
| 259 |
anomaly_faces_embeddings,
|
| 260 |
anomaly_frames_posture_images,
|
| 261 |
aligned_faces_folder,
|
| 262 |
-
frames_folder
|
| 263 |
-
anomaly_sentences_features,
|
| 264 |
-
anomaly_sentences_posture
|
| 265 |
)
|
| 266 |
|
| 267 |
def is_frontal_face(landmarks, threshold=40):
|
|
|
|
| 10 |
from pose_analysis import calculate_posture_score, draw_pose_landmarks
|
| 11 |
from anomaly_detection import anomaly_detection
|
| 12 |
from visualization import plot_mse, plot_mse_histogram, plot_mse_heatmap
|
| 13 |
+
from utils import frame_to_timecode
|
|
|
|
| 14 |
import pandas as pd
|
| 15 |
from facenet_pytorch import MTCNN
|
| 16 |
import torch
|
|
|
|
| 182 |
traceback.print_exc()
|
| 183 |
return (f"Error in video processing: {str(e)}",) + (None,) * 14
|
| 184 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
progress(1.0, "Preparing results")
|
| 186 |
results = f"Number of persons detected: {num_clusters}\n\n"
|
| 187 |
results += "Breakdown:\n"
|
|
|
|
| 241 |
anomaly_faces_embeddings,
|
| 242 |
anomaly_frames_posture_images,
|
| 243 |
aligned_faces_folder,
|
| 244 |
+
frames_folder
|
|
|
|
|
|
|
| 245 |
)
|
| 246 |
|
| 247 |
def is_frontal_face(landmarks, threshold=40):
|