Spaces:
Runtime error
Runtime error
Update video_processing.py
Browse files- video_processing.py +68 -0
video_processing.py
CHANGED
|
@@ -24,6 +24,74 @@ mtcnn = MTCNN(keep_all=False, device=device, thresholds=[0.9, 0.9, 0.9], min_fac
|
|
| 24 |
mp_face_mesh = mp.solutions.face_mesh
|
| 25 |
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.8)
|
| 26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
def process_video(video_path, anomaly_threshold, desired_fps, progress=None):
|
| 28 |
start_time = time.time()
|
| 29 |
output_folder = "output"
|
|
|
|
| 24 |
mp_face_mesh = mp.solutions.face_mesh
|
| 25 |
face_mesh = mp_face_mesh.FaceMesh(static_image_mode=False, max_num_faces=1, min_detection_confidence=0.8)
|
| 26 |
|
| 27 |
+
|
| 28 |
+
def extract_frames(video_path, output_folder, desired_fps, progress_callback=None):
|
| 29 |
+
os.makedirs(output_folder, exist_ok=True)
|
| 30 |
+
clip = VideoFileClip(video_path)
|
| 31 |
+
original_fps = clip.fps
|
| 32 |
+
duration = clip.duration
|
| 33 |
+
total_frames = int(duration * original_fps)
|
| 34 |
+
step = max(1, original_fps / desired_fps)
|
| 35 |
+
total_frames_to_extract = int(total_frames / step)
|
| 36 |
+
|
| 37 |
+
frame_count = 0
|
| 38 |
+
for t in np.arange(0, duration, step / original_fps):
|
| 39 |
+
frame = clip.get_frame(t)
|
| 40 |
+
cv2.imwrite(os.path.join(output_folder, f"frame_{frame_count:04d}.jpg"), cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
|
| 41 |
+
frame_count += 1
|
| 42 |
+
if progress_callback:
|
| 43 |
+
progress = min(100, (frame_count / total_frames_to_extract) * 100)
|
| 44 |
+
progress_callback(progress, f"Extracting frame")
|
| 45 |
+
if frame_count >= total_frames_to_extract:
|
| 46 |
+
break
|
| 47 |
+
clip.close()
|
| 48 |
+
return frame_count, original_fps
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def process_frames(frames_folder, aligned_faces_folder, frame_count, progress):
|
| 52 |
+
embeddings_by_frame = {}
|
| 53 |
+
posture_scores_by_frame = {}
|
| 54 |
+
posture_landmarks_by_frame = {}
|
| 55 |
+
facial_landmarks_by_frame = {}
|
| 56 |
+
aligned_face_paths = []
|
| 57 |
+
frame_files = sorted([f for f in os.listdir(frames_folder) if f.endswith('.jpg')])
|
| 58 |
+
|
| 59 |
+
for i, frame_file in enumerate(frame_files):
|
| 60 |
+
frame_num = int(frame_file.split('_')[1].split('.')[0])
|
| 61 |
+
frame_path = os.path.join(frames_folder, frame_file)
|
| 62 |
+
frame = cv2.imread(frame_path)
|
| 63 |
+
|
| 64 |
+
if frame is not None:
|
| 65 |
+
posture_score, posture_landmarks = calculate_posture_score(frame)
|
| 66 |
+
posture_scores_by_frame[frame_num] = posture_score
|
| 67 |
+
posture_landmarks_by_frame[frame_num] = posture_landmarks
|
| 68 |
+
|
| 69 |
+
boxes, probs = mtcnn.detect(frame)
|
| 70 |
+
|
| 71 |
+
if boxes is not None and len(boxes) > 0 and probs[0] >= 0.99:
|
| 72 |
+
x1, y1, x2, y2 = [int(b) for b in boxes[0]]
|
| 73 |
+
face = frame[y1:y2, x1:x2]
|
| 74 |
+
if face.size > 0:
|
| 75 |
+
face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
|
| 76 |
+
results = face_mesh.process(face_rgb)
|
| 77 |
+
if results.multi_face_landmarks:
|
| 78 |
+
facial_landmarks_by_frame[frame_num] = results.multi_face_landmarks[0]
|
| 79 |
+
if is_frontal_face(results.multi_face_landmarks[0].landmark):
|
| 80 |
+
aligned_face = face
|
| 81 |
+
|
| 82 |
+
if aligned_face is not None:
|
| 83 |
+
aligned_face_resized = cv2.resize(aligned_face, (160, 160))
|
| 84 |
+
output_path = os.path.join(aligned_faces_folder, f"frame_{frame_num}_face.jpg")
|
| 85 |
+
cv2.imwrite(output_path, aligned_face_resized)
|
| 86 |
+
aligned_face_paths.append(output_path)
|
| 87 |
+
embedding = get_face_embedding(aligned_face_resized)
|
| 88 |
+
embeddings_by_frame[frame_num] = embedding
|
| 89 |
+
|
| 90 |
+
progress((i + 1) / len(frame_files), f"Processing frame {i + 1} of {len(frame_files)}")
|
| 91 |
+
|
| 92 |
+
return embeddings_by_frame, posture_scores_by_frame, posture_landmarks_by_frame, aligned_face_paths, facial_landmarks_by_frame
|
| 93 |
+
|
| 94 |
+
|
| 95 |
def process_video(video_path, anomaly_threshold, desired_fps, progress=None):
|
| 96 |
start_time = time.time()
|
| 97 |
output_folder = "output"
|