Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import subprocess
|
|
|
|
| 2 |
|
| 3 |
# Define the command to be executed
|
| 4 |
command = ["python", "setup.py", "build_ext", "--inplace"]
|
|
@@ -233,7 +234,7 @@ def load_model(checkpoint):
|
|
| 233 |
|
| 234 |
|
| 235 |
|
| 236 |
-
def sam_process(input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir, scanned_frames, progress=gr.Progress(track_tqdm=True)):
|
| 237 |
# 1. We need to preprocess the video and store frames in the right directory
|
| 238 |
# — Penser à utiliser un ID unique pour le dossier
|
| 239 |
|
|
@@ -254,7 +255,16 @@ def sam_process(input_first_frame_image, checkpoint, tracking_points, trackings_
|
|
| 254 |
# predictor.reset_state(inference_state) # if any previous tracking, reset
|
| 255 |
|
| 256 |
# Add new point
|
| 257 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
|
| 259 |
|
| 260 |
# Let's add a positive click at (x, y) = (210, 350) to get started
|
|
@@ -360,6 +370,15 @@ def update_ui(vis_frame_type):
|
|
| 360 |
elif vis_frame_type == "render":
|
| 361 |
return gr.update(visible=False), gr.update(visible=True)
|
| 362 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 363 |
|
| 364 |
with gr.Blocks() as demo:
|
| 365 |
first_frame_path = gr.State()
|
|
@@ -404,7 +423,7 @@ with gr.Blocks() as demo:
|
|
| 404 |
video_in = gr.Video(label="Video IN")
|
| 405 |
|
| 406 |
with gr.Column():
|
| 407 |
-
working_frame = gr.Dropdown(label="working frame ID", choices=[""], value=None, visible=
|
| 408 |
output_result = gr.Image(label="current working mask ref")
|
| 409 |
with gr.Row():
|
| 410 |
vis_frame_type = gr.Radio(label="Propagation level", choices=["check", "render"], value="check", scale=2)
|
|
@@ -434,6 +453,13 @@ with gr.Blocks() as demo:
|
|
| 434 |
queue = False
|
| 435 |
)
|
| 436 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 437 |
submit_btn.click(
|
| 438 |
fn = sam_process,
|
| 439 |
inputs = [input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir, scanned_frames],
|
|
|
|
| 1 |
import subprocess
|
| 2 |
+
import re
|
| 3 |
|
| 4 |
# Define the command to be executed
|
| 5 |
command = ["python", "setup.py", "build_ext", "--inplace"]
|
|
|
|
| 234 |
|
| 235 |
|
| 236 |
|
| 237 |
+
def sam_process(input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir, scanned_frames, working_frame, progress=gr.Progress(track_tqdm=True)):
|
| 238 |
# 1. We need to preprocess the video and store frames in the right directory
|
| 239 |
# — Penser à utiliser un ID unique pour le dossier
|
| 240 |
|
|
|
|
| 255 |
# predictor.reset_state(inference_state) # if any previous tracking, reset
|
| 256 |
|
| 257 |
# Add new point
|
| 258 |
+
if working_frame == None:
|
| 259 |
+
ann_frame_idx = 0 # the frame index we interact with
|
| 260 |
+
else:
|
| 261 |
+
# Use a regular expression to find the integer
|
| 262 |
+
match = re.search(r'frame_(\d+)', working_frame)
|
| 263 |
+
if match:
|
| 264 |
+
# Extract the integer from the match
|
| 265 |
+
frame_number = int(match.group(1))
|
| 266 |
+
ann_frame_idx = frame_number
|
| 267 |
+
|
| 268 |
ann_obj_id = 1 # give a unique id to each object we interact with (it can be any integers)
|
| 269 |
|
| 270 |
# Let's add a positive click at (x, y) = (210, 350) to get started
|
|
|
|
| 370 |
elif vis_frame_type == "render":
|
| 371 |
return gr.update(visible=False), gr.update(visible=True)
|
| 372 |
|
| 373 |
+
def switch_working_frame(working_frame, scanned_frames, video_frames_dir):
|
| 374 |
+
# Use a regular expression to find the integer
|
| 375 |
+
match = re.search(r'frame_(\d+)', working_frame)
|
| 376 |
+
if match:
|
| 377 |
+
# Extract the integer from the match
|
| 378 |
+
frame_number = int(match.group(1))
|
| 379 |
+
ann_frame_idx = frame_number
|
| 380 |
+
new_working_frame = os.path.join(video_frames_dir, scanned_frames[ann_frame_idx_frame_idx])
|
| 381 |
+
return new_working_frame, gr.State([]), gr.State([]), new_working_frame, new_working_frame
|
| 382 |
|
| 383 |
with gr.Blocks() as demo:
|
| 384 |
first_frame_path = gr.State()
|
|
|
|
| 423 |
video_in = gr.Video(label="Video IN")
|
| 424 |
|
| 425 |
with gr.Column():
|
| 426 |
+
working_frame = gr.Dropdown(label="working frame ID", choices=[""], value=None, visible=False, allow_custom_value=False, interactive=True)
|
| 427 |
output_result = gr.Image(label="current working mask ref")
|
| 428 |
with gr.Row():
|
| 429 |
vis_frame_type = gr.Radio(label="Propagation level", choices=["check", "render"], value="check", scale=2)
|
|
|
|
| 453 |
queue = False
|
| 454 |
)
|
| 455 |
|
| 456 |
+
working_frame.change(
|
| 457 |
+
fn = switch_working_frame,
|
| 458 |
+
inputs = [working_frame, scanned_frames, video_frames_dir],
|
| 459 |
+
outputs = [first_frame_path, tracking_points, trackings_input_label, input_first_frame_image, points_map],
|
| 460 |
+
queue=False
|
| 461 |
+
)
|
| 462 |
+
|
| 463 |
submit_btn.click(
|
| 464 |
fn = sam_process,
|
| 465 |
inputs = [input_first_frame_image, checkpoint, tracking_points, trackings_input_label, video_frames_dir, scanned_frames],
|