RahmaDev commited on
Commit
75f95bb
·
verified ·
1 Parent(s): 84ca8b9

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +247 -0
app.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gdown
2
+ import os
3
+ import torch
4
+ import requests
5
+ import numpy as np
6
+ import numpy.matlib
7
+ import copy
8
+ import cv2
9
+ from PIL import Image
10
+ from typing import List
11
+ import timm
12
+ import gradio as gr
13
+ import torchvision.transforms as transforms
14
+ import zipfile
15
+
16
+ from pim_module import PluginMoodel # Assure-toi que ce fichier est présent
17
+
18
+ # === Décompression automatique du dossier imgs ===
19
+ if not os.path.exists("imgs") and os.path.exists("imgs.zip"):
20
+ print("Décompression du dossier imgs...")
21
+ with zipfile.ZipFile("imgs.zip", 'r') as zip_ref:
22
+ zip_ref.extractall(".")
23
+ print("Décompression terminée !")
24
+
25
+ # === Téléchargement automatique depuis Google Drive ===
26
+ if not os.path.exists("weights.pt"):
27
+ print("Téléchargement des poids depuis Google Drive avec gdown...")
28
+ file_id = "17RxaEfYeQVKKXThOwqDWM6mHdII5tMpY"
29
+ url = f"https://drive.google.com/uc?id={file_id}"
30
+ gdown.download(url, "weights.pt", quiet=False)
31
+
32
+
33
+
34
+ # === Classes
35
+ classes_list = [
36
+ "Ferrage_Et_Accessoires_Anti_Fausse_Manoeuvre",
37
+ "Ferrage_Et_Accessoires_Busettes",
38
+ "Ferrage_Et_Accessoires_Butees",
39
+ "Ferrage_Et_Accessoires_Chariots",
40
+ "Ferrage_Et_Accessoires_Charniere",
41
+ "Ferrage_Et_Accessoires_Compas_limiteur",
42
+ "Ferrage_Et_Accessoires_Cylindres",
43
+ "Ferrage_Et_Accessoires_Gaches",
44
+ "Ferrage_Et_Accessoires_Renvois_D_Angle",
45
+ "Joints_Et_Consommables_Equerres_Aluminium_Moulees",
46
+ "Joints_Et_Consommables_Visserie_Inox_Alu",
47
+ "Poignee_Carre_7_mm",
48
+ "Poignee_Carre_8_mm",
49
+ "Poignee_Cremone",
50
+ "Poignee_Cuvette",
51
+ "Poignee_De_Tirage",
52
+ "Poignee_Pour_Levant_Coulissant",
53
+ "Serrure_Cremone_Multipoints",
54
+ "Serrure_Cuvette",
55
+ "Serrure_Gaches",
56
+ "Serrure_Loqueteau",
57
+ "Serrure_Pene_Crochet",
58
+ "Serrure_Pour_Porte",
59
+ "Serrure_Tringles"
60
+ ]
61
+
62
+ short_classes_list = [
63
+ "Anti-fausse-manoeuvre",
64
+ "Busettes",
65
+ "Butées",
66
+ "Chariots",
67
+ "Charnière",
68
+ "Compas-limiteur",
69
+ "Cylindres",
70
+ "Gaches",
71
+ "Renvois d'angle",
72
+ "Equerres aluminium moulées",
73
+ "Visserie inox alu",
74
+ "Poignée carré 7 mm",
75
+ "Poignée carré 8 mm",
76
+ "Poignée crémone",
77
+ "Poignée cuvette",
78
+ "Poignée de tirage",
79
+ "Poignée pour levant coulissant",
80
+ "Serrure crémone multipoints",
81
+ "Serrure cuvette",
82
+ "Serrure gaches",
83
+ "Loqueteau",
84
+ "Serrure pene crochet",
85
+ "Serrure pour porte",
86
+ "Serrure tringles",
87
+ ]
88
+
89
+ data_size = 384
90
+ fpn_size = 1536
91
+ num_classes = 24
92
+ num_selects = {'layer1': 256, 'layer2': 128, 'layer3': 64, 'layer4': 32}
93
+ features, grads, module_id_mapper = {}, {}, {}
94
+
95
+ def forward_hook(module, inp_hs, out_hs):
96
+ layer_id = len(features) + 1
97
+ module_id_mapper[module] = layer_id
98
+ features[layer_id] = {"in": inp_hs, "out": out_hs}
99
+
100
+ def backward_hook(module, inp_grad, out_grad):
101
+ layer_id = module_id_mapper[module]
102
+ grads[layer_id] = {"in": inp_grad, "out": out_grad}
103
+
104
+ def build_model(path: str):
105
+ backbone = timm.create_model('swin_large_patch4_window12_384_in22k', pretrained=True)
106
+ model = PluginMoodel(
107
+ backbone=backbone,
108
+ return_nodes=None,
109
+ img_size=data_size,
110
+ use_fpn=True,
111
+ fpn_size=fpn_size,
112
+ proj_type="Linear",
113
+ upsample_type="Conv",
114
+ use_selection=True,
115
+ num_classes=num_classes,
116
+ num_selects=num_selects,
117
+ use_combiner=True,
118
+ comb_proj_size=None
119
+ )
120
+ ckpt = torch.load(path, map_location="cpu", weights_only=False)
121
+ model.load_state_dict(ckpt["model"], strict=False)
122
+ model.eval()
123
+
124
+ for layer in [0, 1, 2, 3]:
125
+ model.backbone.layers[layer].register_forward_hook(forward_hook)
126
+ model.backbone.layers[layer].register_full_backward_hook(backward_hook)
127
+
128
+ for i in range(1, 5):
129
+ getattr(model.fpn_down, f'Proj_layer{i}').register_forward_hook(forward_hook)
130
+ getattr(model.fpn_down, f'Proj_layer{i}').register_full_backward_hook(backward_hook)
131
+ getattr(model.fpn_up, f'Proj_layer{i}').register_forward_hook(forward_hook)
132
+ getattr(model.fpn_up, f'Proj_layer{i}').register_full_backward_hook(backward_hook)
133
+
134
+ return model
135
+
136
+ class ImgLoader:
137
+ def __init__(self, img_size):
138
+ self.transform = transforms.Compose([
139
+ transforms.Resize((510, 510), Image.BILINEAR),
140
+ transforms.CenterCrop((img_size, img_size)),
141
+ transforms.ToTensor(),
142
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
143
+ ])
144
+
145
+ def load(self, input_img):
146
+ if isinstance(input_img, str):
147
+ ori_img = cv2.imread(input_img)
148
+ img = Image.fromarray(cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB))
149
+ elif isinstance(input_img, Image.Image):
150
+ img = input_img
151
+ else:
152
+ raise ValueError("Image invalide")
153
+
154
+ if img.mode != "RGB":
155
+ img = img.convert("RGB")
156
+
157
+ return self.transform(img).unsqueeze(0)
158
+
159
+ def cal_backward(out) -> dict:
160
+ target_layer_names = ['layer1', 'layer2', 'layer3', 'layer4',
161
+ 'FPN1_layer1', 'FPN1_layer2', 'FPN1_layer3', 'FPN1_layer4', 'comb_outs']
162
+
163
+ sum_out = None
164
+ for name in target_layer_names:
165
+ tmp_out = out[name].mean(1) if name != "comb_outs" else out[name]
166
+ tmp_out = torch.softmax(tmp_out, dim=-1)
167
+ sum_out = tmp_out if sum_out is None else sum_out + tmp_out
168
+
169
+ with torch.no_grad():
170
+ smax = torch.softmax(sum_out, dim=-1)
171
+ A = np.transpose(np.matlib.repmat(smax[0], num_classes, 1)) - np.eye(num_classes)
172
+ _, _, V = np.linalg.svd(A, full_matrices=True)
173
+ V = V[num_classes - 1, :]
174
+ if V[0] < 0:
175
+ V = -V
176
+ V = np.log(V)
177
+ V = V - min(V)
178
+ V = V / sum(V)
179
+
180
+ top5_indices = np.argsort(-V)[:5]
181
+ top5_scores = -np.sort(-V)[:5]
182
+
183
+ # Construction du dictionnaire pour gr.Label
184
+ top5_dict = {classes_list[int(idx)]: float(f"{score:.4f}") for idx, score in zip(top5_indices, top5_scores)}
185
+ return top5_dict
186
+
187
+ # === Chargement du modèle
188
+ model = build_model("weights.pt")
189
+ img_loader = ImgLoader(data_size)
190
+
191
+
192
+
193
+ def predict_image(image: Image.Image):
194
+ global features, grads, module_id_mapper
195
+ features, grads, module_id_mapper = {}, {}, {}
196
+
197
+ if image is None:
198
+ return {}
199
+ # raise ValueError("Aucune image reçue. Vérifie l'entrée.")
200
+
201
+ if image.mode != "RGB":
202
+ image = image.convert("RGB")
203
+
204
+ image_path = "temp.jpg"
205
+ image.save(image_path)
206
+
207
+ img_tensor = img_loader.load(image_path)
208
+ out = model(img_tensor)
209
+ top5_dict = cal_backward(out) # {classe: score}
210
+
211
+ gallery_outputs = []
212
+ for idx, class_name in enumerate(list(top5_dict.keys())):
213
+ images = [
214
+ (f"imgs/{class_name}/{class_name}_0001.jpg", f"Exemple {class_name} 1"),
215
+ (f"imgs/{class_name}/{class_name}_0002.jpg", f"Exemple {class_name} 2"),
216
+ (f"imgs/{class_name}/{class_name}_0003.jpg", f"Exemple {class_name} 3"),
217
+ ]
218
+ gallery_outputs.append(images)
219
+
220
+ return top5_dict, *gallery_outputs
221
+
222
+
223
+ # === Interface Gradio
224
+ with gr.Blocks(css="""
225
+ .gr-image-upload { display: none !important }
226
+ .gallery-container .gr-box { height: auto !important; padding: 0 !important; }
227
+ """) as demo:
228
+ with gr.Row():
229
+ with gr.Column(scale=1):
230
+ with gr.Tab("Téléversement"):
231
+ image_input_upload = gr.Image(type="pil", label="Image à classer (upload)", sources=["upload"])
232
+ with gr.Tab("Webcam"):
233
+ image_input_webcam = gr.Image(type="pil", label="Image à classer (webcam)", sources=["webcam"])
234
+
235
+ with gr.Column(scale=1.5):
236
+ label_output = gr.Label(label="Prédictions")
237
+ gallery_outputs = [
238
+ gr.Gallery(label=f"", columns=3, height=300, container=True, elem_classes=["gallery-container"])
239
+ for i in range(5)
240
+ ]
241
+
242
+ image_input_upload.change(fn=predict_image, inputs=image_input_upload, outputs=[label_output] + gallery_outputs)
243
+ image_input_webcam.change(fn=predict_image, inputs=image_input_webcam, outputs=[label_output] + gallery_outputs)
244
+
245
+ if __name__ == "__main__":
246
+ demo.launch()
247
+