Spaces:
Build error
Build error
edits
Browse files
app.py
CHANGED
|
@@ -3,20 +3,18 @@ import gradio as gr
|
|
| 3 |
import cv2
|
| 4 |
|
| 5 |
import torch
|
|
|
|
|
|
|
| 6 |
|
| 7 |
import matplotlib.pyplot as plt
|
| 8 |
from matplotlib import cm
|
| 9 |
from matplotlib import colors
|
| 10 |
from mpl_toolkits.axes_grid1 import ImageGrid
|
| 11 |
|
| 12 |
-
from torchvision import transforms
|
| 13 |
-
|
| 14 |
import fire_network
|
| 15 |
|
| 16 |
import numpy as np
|
| 17 |
|
| 18 |
-
|
| 19 |
-
|
| 20 |
from PIL import Image
|
| 21 |
|
| 22 |
# Possible Scales for multiscale inference
|
|
@@ -30,11 +28,34 @@ state['net_params']['pretrained'] = None # no need for imagenet pretrained model
|
|
| 30 |
net = fire_network.init_network(**state['net_params']).to(device)
|
| 31 |
net.load_state_dict(state['state_dict'])
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
|
| 40 |
# sf_idx_ = [55, 14, 5, 4, 52, 57, 40, 9]
|
|
@@ -51,25 +72,38 @@ def generate_matching_superfeatures(im1, im2, scale_id=6, threshold=50, sf_ids='
|
|
| 51 |
sf_idx_ = np.random.randint(256, size=n_sf_ids)
|
| 52 |
elif sf_ids != '':
|
| 53 |
sf_idx_ = map(int, sf_ids.strip().split(','))
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
|
| 58 |
im1_cv = np.array(im1)[:, :, ::-1].copy()
|
| 59 |
im2_cv = np.array(im2)[:, :, ::-1].copy()
|
| 60 |
|
| 61 |
# extract features
|
| 62 |
with torch.no_grad():
|
| 63 |
-
output1 = net.get_superfeatures(im1_tensor.to(device), scales=[scales[scale_id]])
|
| 64 |
-
feats1 = output1[0][0]
|
| 65 |
-
attns1 = output1[1][0]
|
| 66 |
-
strenghts1 = output1[2][0]
|
| 67 |
-
|
| 68 |
-
output2 = net.get_superfeatures(im2_tensor.to(device), scales=[scales[scale_id]])
|
| 69 |
-
feats2 = output2[0][0]
|
| 70 |
-
attns2 = output2[1][0]
|
| 71 |
-
strenghts2 = output2[2][0]
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
print(feats1.shape, feats2.shape)
|
| 74 |
print(attns1.shape, attns2.shape)
|
| 75 |
print(strenghts1.shape, strenghts2.shape)
|
|
@@ -195,7 +229,7 @@ iface = gr.Interface(
|
|
| 195 |
examples=[
|
| 196 |
["chateau_1.png", "chateau_2.png", 2, 100, '55,14,5,4,52,57,40,9'],
|
| 197 |
["anafi1.jpeg", "anafi2.jpeg", 4, 50, '99,100,142,213,236'],
|
| 198 |
-
["areopoli1.jpeg", "areopoli2.jpeg", 4, 50, '
|
| 199 |
]
|
| 200 |
)
|
| 201 |
iface.launch(enable_queue=True)
|
|
|
|
| 3 |
import cv2
|
| 4 |
|
| 5 |
import torch
|
| 6 |
+
import torch.utils.data as data
|
| 7 |
+
from torchvision import transforms
|
| 8 |
|
| 9 |
import matplotlib.pyplot as plt
|
| 10 |
from matplotlib import cm
|
| 11 |
from matplotlib import colors
|
| 12 |
from mpl_toolkits.axes_grid1 import ImageGrid
|
| 13 |
|
|
|
|
|
|
|
| 14 |
import fire_network
|
| 15 |
|
| 16 |
import numpy as np
|
| 17 |
|
|
|
|
|
|
|
| 18 |
from PIL import Image
|
| 19 |
|
| 20 |
# Possible Scales for multiscale inference
|
|
|
|
| 28 |
net = fire_network.init_network(**state['net_params']).to(device)
|
| 29 |
net.load_state_dict(state['state_dict'])
|
| 30 |
|
| 31 |
+
|
| 32 |
+
# ---------------------------------------
|
| 33 |
+
# transform = transforms.Compose([
|
| 34 |
+
# transforms.Resize(1024),
|
| 35 |
+
# transforms.ToTensor(),
|
| 36 |
+
# transforms.Normalize(**dict(zip(["mean", "std"], net.runtime['mean_std'])))
|
| 37 |
+
# ])
|
| 38 |
+
# ---------------------------------------
|
| 39 |
+
|
| 40 |
+
class ImgDataset(data.Dataset):
|
| 41 |
+
|
| 42 |
+
def __init__(self, images, imsize):
|
| 43 |
+
self.images = images
|
| 44 |
+
self.imsize = imsize
|
| 45 |
+
self.transform = transforms.Compose([transforms.ToTensor(), \
|
| 46 |
+
transforms.Normalize(**dict(zip(["mean", "std"], net.runtime['mean_std'])))])
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def __getitem__(self, index):
|
| 50 |
+
img = self.images[index]
|
| 51 |
+
return self.transform(img.thumbnail((self.imsize, self.imsize), Image.ANTIALIAS))
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def __len__(self):
|
| 55 |
+
return len(self.images)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
|
| 59 |
|
| 60 |
|
| 61 |
# sf_idx_ = [55, 14, 5, 4, 52, 57, 40, 9]
|
|
|
|
| 72 |
sf_idx_ = np.random.randint(256, size=n_sf_ids)
|
| 73 |
elif sf_ids != '':
|
| 74 |
sf_idx_ = map(int, sf_ids.strip().split(','))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
dataset_ = ImgDataset(images=[im1, im2], imsize=1024)
|
| 78 |
+
loader = torch.utils.data.DataLoader(dataset_, shuffle=False, pin_memory=True)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
# im1_tensor = transform(im1).unsqueeze(0)
|
| 82 |
+
# im2_tensor = transform(im2).unsqueeze(0)
|
| 83 |
|
| 84 |
im1_cv = np.array(im1)[:, :, ::-1].copy()
|
| 85 |
im2_cv = np.array(im2)[:, :, ::-1].copy()
|
| 86 |
|
| 87 |
# extract features
|
| 88 |
with torch.no_grad():
|
| 89 |
+
# output1 = net.get_superfeatures(im1_tensor.to(device), scales=[scales[scale_id]])
|
| 90 |
+
# feats1 = output1[0][0]
|
| 91 |
+
# attns1 = output1[1][0]
|
| 92 |
+
# strenghts1 = output1[2][0]
|
| 93 |
+
|
| 94 |
+
# output2 = net.get_superfeatures(im2_tensor.to(device), scales=[scales[scale_id]])
|
| 95 |
+
# feats2 = output2[0][0]
|
| 96 |
+
# attns2 = output2[1][0]
|
| 97 |
+
# strenghts2 = output2[2][0]
|
| 98 |
+
outputs = []
|
| 99 |
+
for im_tensor in loader:
|
| 100 |
+
outputs.append(net.get_superfeatures(im_tensor.to(device), scales=[scales[scale_id]]))
|
| 101 |
+
feats1 = outputs[0][0][0]
|
| 102 |
+
attns1 = outputs[0][1][0]
|
| 103 |
+
strenghts1 = outputs[0][2][0]
|
| 104 |
+
feats2 = outputs[1][0][0]
|
| 105 |
+
attns2 = outputs[1][1][0]
|
| 106 |
+
strenghts2 = outputs[1][2][0]
|
| 107 |
print(feats1.shape, feats2.shape)
|
| 108 |
print(attns1.shape, attns2.shape)
|
| 109 |
print(strenghts1.shape, strenghts2.shape)
|
|
|
|
| 229 |
examples=[
|
| 230 |
["chateau_1.png", "chateau_2.png", 2, 100, '55,14,5,4,52,57,40,9'],
|
| 231 |
["anafi1.jpeg", "anafi2.jpeg", 4, 50, '99,100,142,213,236'],
|
| 232 |
+
["areopoli1.jpeg", "areopoli2.jpeg", 4, 50, '72,44,142,213,236'],
|
| 233 |
]
|
| 234 |
)
|
| 235 |
iface.launch(enable_queue=True)
|