{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import av\n", "import torch\n", "import numpy as np\n", "from transformers import SiglipImageProcessor\n", "from torch.utils.data import Dataset, DataLoader\n", "import pandas as pd\n", "import os\n", "from transformers import SiglipModel\n", "from tqdm import tqdm\n", "\n", "import json\n", "\n", "DEVICE= torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def read_video_pyav(container, indices):\n", " '''\n", " Decode the video with PyAV decoder.\n", " Args:\n", " container (`av.container.input.InputContainer`): PyAV container.\n", " indices (`List[int]`): List of frame indices to decode.\n", " Returns:\n", " result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).\n", " '''\n", " frames = []\n", " container.seek(0)\n", " start_index = indices[0]\n", " end_index = indices[-1]\n", " for i, frame in enumerate(container.decode(video=0)):\n", " if i > end_index:\n", " break\n", " if i >= start_index and i in indices:\n", " frames.append(frame)\n", " return np.stack([x.to_ndarray(format=\"rgb24\") for x in frames])\n", "\n", "\n", "def sample_frame_indices(clip_len, frame_sample_rate, seg_len, num_segment, chunk_nb):\n", " '''\n", " Sample a given number of frame indices from the video.\n", " Args:\n", " clip_len (`int`): Total number of frames to sample.\n", " frame_sample_rate (`int`): Sample every n-th frame.\n", " seg_len (`int`): Maximum allowed index of sample's last frame.\n", " Returns:\n", " indices (`List[int]`): List of sampled frame indices\n", " '''\n", " \n", " end_idx = seg_len - 1\n", " start_idx = 0\n", " indices = np.linspace(start_idx, end_idx, num=num_segment * clip_len)[chunk_nb*clip_len:(chunk_nb+1)*clip_len]\n", " indices = np.clip(indices, 0, seg_len - 1).astype(np.int64)\n", " return indices" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class VideoClsDataset(Dataset):\n", " def __init__(self, anno_path, prefix, processor, num_segment) -> None:\n", " super().__init__()\n", " cleaned = pd.read_csv(anno_path, header=None, delimiter=' ')\n", " self.samples = list(cleaned.values[:, 0])\n", " self.labels = list(cleaned.values[:, 1])\n", " self.num_segment = num_segment\n", " \n", " self.label_array = list()\n", " self.dataset_samples = list()\n", " for i in range(self.num_segment):\n", " for sample, label in zip(self.samples, self.labels):\n", " self.label_array.append((i, label))\n", " self.dataset_samples.append(sample)\n", " \n", " self.prefix = prefix\n", " self.processor = processor\n", " def __len__(self):\n", " return len(self.label_array)\n", " def __getitem__(self, index):\n", " video_file = os.path.join(self.prefix, self.dataset_samples[index])\n", " chunk_nb, label = self.label_array[index]\n", " container = av.open(video_file)\n", " indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames, num_segment = self.num_segment, chunk_nb=chunk_nb)\n", " video = read_video_pyav(container, indices)\n", " inputs = self.processor.preprocess(list(video), return_tensors='pt')\n", " inputs.update(labels=label)\n", " return inputs" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model = SiglipModel.from_pretrained(\"google/siglip-so400m-patch14-384\") #google/siglip-so400m-patch14-384\n", "model.to(DEVICE)\n", "preprocess = SiglipImageProcessor.from_pretrained(\"google/siglip-so400m-patch14-384\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "@torch.no_grad()\n", "def extract_feature(data_loader, model):\n", " model.eval()\n", " final_result = dict()\n", " for batch in tqdm(data_loader):\n", " for k in batch.keys():\n", " batch[k] = batch[k].to(DEVICE, non_blocking=True)\n", " B, T, C, H, W = batch['pixel_values'].shape\n", " batch['pixel_values'] = batch['pixel_values'].flatten(0,1)\n", " target = batch.pop('labels')\n", " output = model.get_image_features(**batch)\n", " output = output.reshape(B, T, output.shape[-1])\n", " for id, feat in zip(target, output):\n", " id = id.item()\n", " feat = feat.cpu().numpy()\n", " if final_result.get(id, None) is None:\n", " final_result[id] = []\n", " final_result[id].append(feat)\n", " break\n", " for k, v in final_result.items():\n", " final_result[k] = np.concatenate(final_result[k], axis=0)\n", " #np.save(file + \"_embedding\", np.array(list(final_result.values())))\n", " #np.save(file + \"_labels\", np.array(list(final_result.keys())))\n", " return np.array(list(final_result.values())), np.array(list(final_result.keys()))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# FOR DVSC23" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "DATASET_NAME = \"DVSC23\"\n", "DATA_PATH_QUERY=f'annotations/{DATASET_NAME}/queries/test.csv'\n", "DATA_PATH_DATABASE=f'annotations/{DATASET_NAME}/database/test.csv'\n", "\n", "QUERY_PREFIX=f'/path/to/prefix_path_of_query'\n", "DATABASE_PREFIX=f'/path/to/prefix_path_of_database'\n", "\n", "QUERY_MAP_PATH = f\"annotations/{DATASET_NAME}/queries/class_map.json\"\n", "DATABASE_MAP_PATH = f\"annotations/{DATASET_NAME}/database/class_map.json\"\n", "\n", "OUTPUT_DIR_QUERY=f'{DATASET_NAME}/queries'\n", "OUTPUT_DIR_DATABASE=f'{DATASET_NAME}/database'\n", "os.makedirs(OUTPUT_DIR_QUERY, exist_ok=True)\n", "os.makedirs(OUTPUT_DIR_DATABASE, exist_ok=True)\n", "\n", "NUM_SEGMENTS = 5\n", "BATCH_SIZE = 4" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def normalize(embeddings):\n", " #embeddings = (embeddings - embeddings.mean(0, keepdims=True)) / (embeddings.std(0, keepdims=True) + 1e-6)\n", " embeddings /= np.linalg.norm(embeddings, axis=-1, keepdims=True) + 1e-6\n", " embeddings = (embeddings - embeddings.mean(0, keepdims=True)) / (embeddings.std(0, keepdims=True) + 1e-6)\n", " return embeddings\n", "\n", "def save_features(embeddings, ids, save_path, video_id_map):\n", " \n", " def flatten(data, ids):\n", " T = data.shape[1]\n", " return data.reshape(-1, data.shape[-1]), np.tile(ids[:, None],(1, T)).flatten()\n", " embeddings, ids = flatten(embeddings, ids)\n", "\n", "\n", " video_ids = [video_id_map[str(id)] for id in ids] # Can also be str: \"Q20000\", ...\n", " sort_ids = np.argsort(video_ids).tolist()\n", " np.savez(\n", " save_path,\n", " video_ids=np.sort(video_ids).tolist(),\n", " timestamps=[[0, 1]]*len(video_ids),\n", " features=embeddings[sort_ids].astype(np.float32),\n", " )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#extract feature for query in DVSC23\n", "query_dataset = VideoClsDataset(DATA_PATH_QUERY, QUERY_PREFIX, preprocess, NUM_SEGMENTS)\n", "dataloader = DataLoader(query_dataset, num_workers=4, batch_size=BATCH_SIZE)\n", "query_embeddings, query_ids = extract_feature(dataloader, model)\n", "query_embeddings = normalize(query_embeddings)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with open(QUERY_MAP_PATH, 'r') as file:\n", " query_map = json.load(file)\n", "save_features(query_embeddings, query_ids, f'{OUTPUT_DIR_QUERY}/descriptors.npz', query_map)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#extract feature for reference in DVSC23\n", "ref_dataset = VideoClsDataset(DATA_PATH_DATABASE, DATABASE_PREFIX, preprocess, NUM_SEGMENTS)\n", "dataloader = DataLoader(ref_dataset, num_workers=4, batch_size=BATCH_SIZE)\n", "ref_embeddings, ref_ids = extract_feature(dataloader, model)\n", "ref_embeddings = normalize(ref_embeddings)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with open(DATABASE_MAP_PATH, 'r') as file:\n", " database_map = json.load(file)\n", "save_features(ref_embeddings, ref_ids, f'{OUTPUT_DIR_DATABASE}/descriptors.npz', database_map)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%cd DVSC23\n", "!python descriptor_eval.py \\\n", " --query_features '{OUTPUT_DIR_QUERY}/descriptors.npz' \\\n", " --ref_features '{OUTPUT_DIR_DATABASE}/descriptors.npz' \\\n", " --ground_truth ../annotations/DVSC23/test_ground_truth.csv\n", "%cd .." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# FOR FIVR-5K" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "DATA_PATH_QUERY='annotations/FIVR-5K/queries/test.csv'\n", "DATA_PATH_DATABASE='annotations/FIVR-5K/database/test.csv'\n", "\n", "QUERY_PREFIX=f'/path/to/prefix_path_of_query'\n", "DATABASE_PREFIX=f'/path/to/prefix_path_of_database'\n", "\n", "\n", "QUERY_MAP_PATH = \"annotations/FIVR-5K/queries/class_map.json\"\n", "DATABASE_MAP_PATH = \"annotations/FIVR-5K/database/class_map.json\"\n", "\n", "\n", "OUTPUT_DIR='FIVR-5K'\n", "os.makedirs(OUTPUT_DIR, exist_ok=True)\n", "\n", "NUM_SEGMENTS = 1\n", "BATCH_SIZE = 4" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def calculate_similarity(test_feature, train_feature):\n", " test_feature /= np.linalg.norm(test_feature, axis=-1, keepdims=True) + 1e-6\n", " train_feature /= np.linalg.norm(train_feature, axis=-1, keepdims=True) + 1e-6\n", " return test_feature @ (train_feature.T)\n", "\n", "\n", "def save_predictions(logits, save_path, query_map, database_map):\n", " prediction = dict()\n", " for query_id, query_prob in enumerate(logits):\n", " prob_per_query = dict()\n", " for ref_id, prob in enumerate(query_prob):\n", " if prob.item() > 0 :\n", " prob_per_query[str(database_map[str(ref_id)]).split('.')[0]] = prob.item()\n", " prediction[query_map[str(query_id)].split('.')[0]] = prob_per_query\n", " with open(save_path, 'w') as f:\n", " json.dump(prediction, f)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#extract feature for query in FIVR-5K\n", "query_dataset = VideoClsDataset(DATA_PATH_QUERY, QUERY_PREFIX, preprocess, NUM_SEGMENTS)\n", "dataloader = DataLoader(query_dataset, num_workers=4, batch_size=BATCH_SIZE)\n", "query_embeddings, query_ids = extract_feature(dataloader, model)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#extract feature for reference in FIVR-5K\n", "ref_dataset = VideoClsDataset(DATA_PATH_DATABASE, DATABASE_PREFIX, preprocess, NUM_SEGMENTS)\n", "dataloader = DataLoader(ref_dataset, num_workers=4, batch_size=BATCH_SIZE)\n", "ref_embeddings, ref_ids = extract_feature(dataloader, model)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "with open(QUERY_MAP_PATH, 'r') as file:\n", " query_map = json.load(file)\n", "\n", "with open(DATABASE_MAP_PATH, 'r') as file:\n", " database_map = json.load(file)\n", "\n", "similarity = calculate_similarity(query_embeddings.mean(1), ref_embeddings.mean(1))\n", "save_predictions(similarity, f\"{OUTPUT_DIR}/prediction.json\", query_map, database_map)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%cd FIVR-200K\n", "!python evaluation.py --result_file \"../{OUTPUT_DIR}/prediction.json\" \\\n", " --relevant_labels ND,DS \\\n", " --dataset_ids ../annotations/FIVR-5K/used_videos.txt \\\n", " --annotations_file ../annotations/FIVR-5K/annotation.json\n", "%cd .." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%cd FIVR-200K\n", "!python evaluation.py --result_file \"../{OUTPUT_DIR}/prediction.json\" \\\n", " --relevant_labels ND,DS,CS \\\n", " --dataset_ids ../annotations/FIVR-5K/used_videos.txt \\\n", " --annotations_file ../annotations/FIVR-5K/annotation.json\n", "%cd .." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%cd FIVR-200K\n", "!python evaluation.py --result_file \"../{OUTPUT_DIR}/prediction.json\" \\\n", " --relevant_labels ND,DS,CS,IS \\\n", " --dataset_ids ../annotations/FIVR-5K/used_videos.txt \\\n", " --annotations_file ../annotations/FIVR-5K/annotation.json\n", "%cd .." ] } ], "metadata": { "kernelspec": { "display_name": "base", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.19" } }, "nbformat": 4, "nbformat_minor": 2 }