Spaces:
Runtime error
Runtime error
Update utils.py
Browse files
utils.py
CHANGED
|
@@ -26,6 +26,7 @@ def add_timecode_to_image(image, timecode):
|
|
| 26 |
font = ImageFont.truetype("arial.ttf", 15)
|
| 27 |
draw.text((10, 10), timecode, (255, 0, 0), font=font)
|
| 28 |
return np.array(img_pil)
|
|
|
|
| 29 |
|
| 30 |
def add_timecode_to_image_body(image, timecode):
|
| 31 |
from PIL import Image, ImageDraw, ImageFont
|
|
@@ -37,6 +38,39 @@ def add_timecode_to_image_body(image, timecode):
|
|
| 37 |
draw.text((10, 10), timecode, (255, 0, 0), font=font)
|
| 38 |
return np.array(img_pil)
|
| 39 |
|
| 40 |
-
def
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
font = ImageFont.truetype("arial.ttf", 15)
|
| 27 |
draw.text((10, 10), timecode, (255, 0, 0), font=font)
|
| 28 |
return np.array(img_pil)
|
| 29 |
+
|
| 30 |
|
| 31 |
def add_timecode_to_image_body(image, timecode):
|
| 32 |
from PIL import Image, ImageDraw, ImageFont
|
|
|
|
| 38 |
draw.text((10, 10), timecode, (255, 0, 0), font=font)
|
| 39 |
return np.array(img_pil)
|
| 40 |
|
| 41 |
+
def create_annotated_video(video_path, df, mse_embeddings, largest_cluster, output_path):
|
| 42 |
+
video = cv2.VideoCapture(video_path)
|
| 43 |
+
fps = video.get(cv2.CAP_PROP_FPS)
|
| 44 |
+
width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 45 |
+
height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 46 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 47 |
+
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
| 48 |
+
frame_number = 0
|
| 49 |
+
while True:
|
| 50 |
+
ret, frame = video.read()
|
| 51 |
+
if not ret:
|
| 52 |
+
break
|
| 53 |
+
# Detect face and draw bounding box
|
| 54 |
+
boxes, _ = mtcnn.detect(frame)
|
| 55 |
+
if boxes is not None and len(boxes) > 0:
|
| 56 |
+
box = boxes[0]
|
| 57 |
+
cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 0), 2)
|
| 58 |
+
# Draw facial landmarks
|
| 59 |
+
face_mesh_results = face_mesh.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 60 |
+
if face_mesh_results.multi_face_landmarks:
|
| 61 |
+
for face_landmarks in face_mesh_results.multi_face_landmarks:
|
| 62 |
+
mp_drawing.draw_landmarks(
|
| 63 |
+
image=frame,
|
| 64 |
+
landmark_list=face_landmarks,
|
| 65 |
+
connections=mp_face_mesh.FACEMESH_TESSELATION,
|
| 66 |
+
landmark_drawing_spec=None,
|
| 67 |
+
connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style()
|
| 68 |
+
)
|
| 69 |
+
# Add MSE annotation
|
| 70 |
+
if frame_number in df['Frame'].values:
|
| 71 |
+
mse = mse_embeddings[df['Frame'] == frame_number].iloc[0]
|
| 72 |
+
cv2.putText(frame, f"MSE: {mse:.4f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
|
| 73 |
+
out.write(frame)
|
| 74 |
+
frame_number += 1
|
| 75 |
+
video.release()
|
| 76 |
+
out.release()
|