Update app.py
Browse files
app.py
CHANGED
@@ -1,191 +1,365 @@
|
|
1 |
-
import
|
2 |
import numpy as np
|
3 |
-
import
|
4 |
import cv2
|
5 |
import tensorflow as tf
|
6 |
-
|
7 |
-
import
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
}
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
if progress is None:
|
27 |
return
|
28 |
try:
|
29 |
-
val = float(
|
30 |
-
val
|
|
|
31 |
except Exception:
|
32 |
-
val =
|
|
|
|
|
33 |
try:
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
38 |
except Exception:
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
_update_progress(progress, 10, desc="Préparation...")
|
54 |
-
grad_model = tf.keras.models.Model(
|
55 |
-
[model.inputs], [model.get_layer(layer_name).output, model.output]
|
56 |
-
)
|
57 |
-
|
58 |
-
with tf.GradientTape() as tape:
|
59 |
-
conv_outputs, predictions = grad_model(img_array)
|
60 |
-
pred_index = tf.argmax(predictions[0])
|
61 |
-
loss = predictions[:, pred_index]
|
62 |
-
|
63 |
-
_update_progress(progress, 40, desc="Calcul des gradients...")
|
64 |
-
|
65 |
-
grads = tape.gradient(loss, conv_outputs)
|
66 |
-
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
|
67 |
-
conv_outputs = conv_outputs[0]
|
68 |
-
|
69 |
-
_update_progress(progress, 70, desc="Génération de la heatmap...")
|
70 |
-
|
71 |
-
heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_outputs), axis=-1)
|
72 |
-
heatmap = np.maximum(heatmap, 0)
|
73 |
-
heatmap /= np.max(heatmap) if np.max(heatmap) != 0 else 1
|
74 |
-
|
75 |
-
heatmap = cv2.resize(heatmap, (img_array.shape[2], img_array.shape[1]))
|
76 |
-
heatmap = np.uint8(255 * heatmap)
|
77 |
-
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
|
78 |
-
|
79 |
-
_update_progress(progress, 90, desc="Application overlay...")
|
80 |
-
|
81 |
-
superimposed_img = heatmap * 0.4 + img_array[0] * 255
|
82 |
-
_update_progress(progress, 100, desc="✅ Grad-CAM terminé !")
|
83 |
-
|
84 |
-
return np.uint8(superimposed_img)
|
85 |
-
|
86 |
-
# -----------------------
|
87 |
-
# UI : prédiction rapide
|
88 |
-
# -----------------------
|
89 |
-
def quick_predict_ui(image, model_names, progress=gr.Progress()):
|
90 |
-
global current_preds
|
91 |
-
current_preds = {}
|
92 |
-
if image is None:
|
93 |
-
return "⚠️ Pas d'image", None, None
|
94 |
-
|
95 |
-
_update_progress(progress, 5, desc="Prétraitement...")
|
96 |
-
img_array = preprocess_image(image)
|
97 |
-
|
98 |
-
fig, ax = plt.subplots()
|
99 |
-
ax.set_title("Prédictions modèles")
|
100 |
-
bar_labels, bar_values = [], []
|
101 |
-
|
102 |
-
step = 30
|
103 |
-
for idx, name in enumerate(model_names):
|
104 |
-
_update_progress(progress, 5 + step * (idx+1) / len(model_names),
|
105 |
-
desc=f"Prédiction avec {name}...")
|
106 |
-
|
107 |
-
if name not in loaded_models:
|
108 |
-
loaded_models[name] = load_model(AVAILABLE_MODELS[name])
|
109 |
-
model = loaded_models[name]
|
110 |
-
|
111 |
-
preds = model.predict(img_array, verbose=0)[0]
|
112 |
-
current_preds[name] = preds
|
113 |
-
bar_labels.append(name)
|
114 |
-
bar_values.append(float(np.max(preds)))
|
115 |
-
|
116 |
-
_update_progress(progress, 70, desc="Agrégation...")
|
117 |
-
|
118 |
-
if len(current_preds) > 1:
|
119 |
-
avg = np.mean(list(current_preds.values()), axis=0)
|
120 |
-
current_preds["ensemble"] = avg
|
121 |
-
idx = np.argmax(avg)
|
122 |
-
label = f"Ensemble : {idx} ({avg[idx]:.2%})"
|
123 |
else:
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
cam_img = make_gradcam(model, img_array, layer_name, progress=progress)
|
158 |
-
|
159 |
-
_update_progress(progress, 100, desc="✅ Grad-CAM généré !")
|
160 |
-
|
161 |
-
return cam_img, "✅ Grad-CAM généré"
|
162 |
-
|
163 |
-
# -----------------------
|
164 |
-
# Interface Gradio
|
165 |
-
# -----------------------
|
166 |
-
with gr.Blocks() as demo:
|
167 |
-
gr.Markdown("## 🧠 Diagnostic IA avec Grad-CAM")
|
168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
with gr.Row():
|
170 |
-
with gr.Column():
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
outputs=[gradcam_out, status_out])
|
190 |
-
|
191 |
-
demo.launch()
|
|
|
1 |
+
import os
|
2 |
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
import cv2
|
5 |
import tensorflow as tf
|
6 |
+
import keras
|
7 |
+
from keras.models import Model
|
8 |
+
from keras.preprocessing import image
|
9 |
+
from huggingface_hub import hf_hub_download
|
10 |
+
import pandas as pd
|
11 |
+
from PIL import Image
|
12 |
+
import plotly.express as px
|
13 |
+
import time
|
14 |
+
|
15 |
+
# Désactiver GPU et logs TensorFlow
|
16 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
|
17 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
18 |
+
tf.config.set_visible_devices([], 'GPU')
|
19 |
+
|
20 |
+
# ---- Configuration ----
|
21 |
+
CLASS_NAMES = ['akiec', 'bcc', 'bkl', 'df', 'nv', 'vasc', 'mel']
|
22 |
+
label_to_index = {name: i for i, name in enumerate(CLASS_NAMES)}
|
23 |
+
diagnosis_map = {
|
24 |
+
'akiec': 'Bénin', 'bcc': 'Malin', 'bkl': 'Bénin', 'df': 'Bénin',
|
25 |
+
'nv': 'Bénin', 'vasc': 'Bénin', 'mel': 'Malin'
|
26 |
}
|
27 |
+
|
28 |
+
# ---- Chargement des modèles ----
|
29 |
+
def load_models_safely():
|
30 |
+
models = {}
|
31 |
+
try:
|
32 |
+
print("📥 Téléchargement ResNet50...")
|
33 |
+
resnet_path = hf_hub_download(repo_id="ericjedha/resnet50", filename="Resnet50.keras")
|
34 |
+
models['resnet50'] = keras.saving.load_model(resnet_path, compile=False)
|
35 |
+
print("✅ ResNet50 chargé")
|
36 |
+
except Exception as e:
|
37 |
+
models['resnet50'] = None
|
38 |
+
|
39 |
+
try:
|
40 |
+
print("📥 Téléchargement DenseNet201...")
|
41 |
+
densenet_path = hf_hub_download(repo_id="ericjedha/densenet201", filename="Densenet201.keras")
|
42 |
+
models['densenet201'] = keras.saving.load_model(densenet_path, compile=False)
|
43 |
+
print("✅ DenseNet201 chargé")
|
44 |
+
except Exception as e:
|
45 |
+
models['densenet201'] = None
|
46 |
+
|
47 |
+
try:
|
48 |
+
print("📥 Chargement Xception local...")
|
49 |
+
if os.path.exists("Xception.keras"):
|
50 |
+
models['xception'] = keras.saving.load_model("Xception.keras", compile=False)
|
51 |
+
print("✅ Xception chargé")
|
52 |
+
else:
|
53 |
+
models['xception'] = None
|
54 |
+
except Exception as e:
|
55 |
+
models['xception'] = None
|
56 |
+
|
57 |
+
loaded = {k: v for k, v in models.items() if v is not None}
|
58 |
+
if not loaded:
|
59 |
+
raise Exception("❌ Aucun modèle n'a pu être chargé!")
|
60 |
+
|
61 |
+
print(f"🎯 Modèles chargés: {list(loaded.keys())}")
|
62 |
+
return models
|
63 |
+
|
64 |
+
try:
|
65 |
+
models_dict = load_models_safely()
|
66 |
+
model_resnet50 = models_dict.get('resnet50')
|
67 |
+
model_densenet = models_dict.get('densenet201')
|
68 |
+
model_xcept = models_dict.get('xception')
|
69 |
+
except Exception as e:
|
70 |
+
print(f"🚨 ERREUR CRITIQUE: {e}")
|
71 |
+
model_resnet50 = model_densenet = model_xcept = None
|
72 |
+
|
73 |
+
# ---- Préprocesseurs ----
|
74 |
+
from tensorflow.keras.applications.xception import preprocess_input as preprocess_xception
|
75 |
+
from tensorflow.keras.applications.resnet50 import preprocess_input as preprocess_resnet
|
76 |
+
from tensorflow.keras.applications.densenet import preprocess_input as preprocess_densenet
|
77 |
+
|
78 |
+
# ---- Utils ----
|
79 |
+
def _renorm_safe(p: np.ndarray) -> np.ndarray:
|
80 |
+
p = np.clip(p, 0.0, None) # Évite les valeurs négatives
|
81 |
+
s = np.sum(p)
|
82 |
+
if s <= 0:
|
83 |
+
return np.ones_like(p, dtype=np.float32) / len(p)
|
84 |
+
normalized = p / s
|
85 |
+
return normalized / np.sum(normalized) if np.sum(normalized) > 1.0001 else normalized
|
86 |
+
|
87 |
+
def get_primary_input_name(model):
|
88 |
+
if isinstance(model.inputs, list) and len(model.inputs) > 0:
|
89 |
+
return model.inputs[0].name.split(':')[0]
|
90 |
+
return "input_1"
|
91 |
+
|
92 |
+
# Helper progress robuste
|
93 |
+
def _update_progress(progress, value, desc=None):
|
94 |
+
"""
|
95 |
+
Wrapper robuste qui accepte valeur 0-1 ou 0-100
|
96 |
+
et anime la progression au lieu de sauter.
|
97 |
+
"""
|
98 |
if progress is None:
|
99 |
return
|
100 |
try:
|
101 |
+
val = float(value)
|
102 |
+
if val > 1.0: # convertir 0..100 → 0..1
|
103 |
+
val = val / 100.0
|
104 |
except Exception:
|
105 |
+
val = value
|
106 |
+
|
107 |
+
# interpolation douce : on avance par petits pas
|
108 |
try:
|
109 |
+
current = getattr(progress, "_last_val", 0.0)
|
110 |
+
steps = 10
|
111 |
+
for step in np.linspace(current, val, steps):
|
112 |
+
if desc is None:
|
113 |
+
progress(float(step))
|
114 |
+
else:
|
115 |
+
progress(float(step), desc=desc)
|
116 |
+
time.sleep(0.02) # vitesse de lissage
|
117 |
+
progress._last_val = val
|
118 |
except Exception:
|
119 |
+
try:
|
120 |
+
if desc is None:
|
121 |
+
progress(val)
|
122 |
+
else:
|
123 |
+
progress(val, desc)
|
124 |
+
except Exception:
|
125 |
+
pass
|
126 |
+
|
127 |
+
# ---- PREDICT SINGLE ----
|
128 |
+
def predict_single(img_input, weights=(0.45, 0.25, 0.30), normalize=True):
|
129 |
+
if isinstance(img_input, str):
|
130 |
+
pil_img = Image.open(img_input).convert("RGB")
|
131 |
+
elif isinstance(img_input, Image.Image):
|
132 |
+
pil_img = img_input.convert("RGB")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
else:
|
134 |
+
raise ValueError("img_input doit être un chemin (str) ou une image PIL")
|
135 |
+
|
136 |
+
preds = {}
|
137 |
+
if model_xcept is not None:
|
138 |
+
img_x = np.expand_dims(preprocess_xception(np.array(pil_img.resize((299, 299), resample=Image.BILINEAR))), axis=0)
|
139 |
+
preds['xception'] = model_xcept.predict(img_x, verbose=0)[0]
|
140 |
+
if model_resnet50 is not None:
|
141 |
+
img_r = np.expand_dims(preprocess_resnet(np.array(pil_img.resize((224, 224), resample=Image.BILINEAR))), axis=0)
|
142 |
+
preds['resnet50'] = model_resnet50.predict(img_r, verbose=0)[0]
|
143 |
+
if model_densenet is not None:
|
144 |
+
img_d = np.expand_dims(preprocess_densenet(np.array(pil_img.resize((224, 224), resample=Image.BILINEAR))), axis=0)
|
145 |
+
preds['densenet201'] = model_densenet.predict(img_d, verbose=0)[0]
|
146 |
+
|
147 |
+
ensemble = np.zeros(len(CLASS_NAMES), dtype=np.float32)
|
148 |
+
if 'xception' in preds: ensemble += weights[0] * preds['xception']
|
149 |
+
if 'resnet50' in preds: ensemble += weights[1] * preds['resnet50']
|
150 |
+
if 'densenet201' in preds: ensemble += weights[2] * preds['densenet201']
|
151 |
+
if 'densenet201' in preds:
|
152 |
+
mel_idx = label_to_index['mel']
|
153 |
+
ensemble[mel_idx] = 0.5 * ensemble[mel_idx] + 0.5 * preds['densenet201'][mel_idx]
|
154 |
+
|
155 |
+
if normalize:
|
156 |
+
ensemble = _renorm_safe(ensemble)
|
157 |
+
|
158 |
+
preds['ensemble'] = ensemble
|
159 |
+
return preds
|
160 |
+
|
161 |
+
# ---- Helpers Grad-CAM ----
|
162 |
+
LAST_CONV_LAYERS = {
|
163 |
+
"xception": "block14_sepconv2_act",
|
164 |
+
"resnet50": "conv5_block3_out",
|
165 |
+
"densenet201": "conv5_block32_concat"
|
166 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
|
168 |
+
def _guess_backbone_name(model):
|
169 |
+
name = (getattr(model, "name", "") or "").lower()
|
170 |
+
if "xception" in name: return "xception"
|
171 |
+
if "resnet" in name: return "resnet50"
|
172 |
+
if "densenet" in name: return "densenet201"
|
173 |
+
return None
|
174 |
+
|
175 |
+
def find_last_dense_layer(model):
|
176 |
+
for layer in reversed(model.layers):
|
177 |
+
if isinstance(layer, keras.layers.Dense):
|
178 |
+
return layer
|
179 |
+
raise ValueError("Aucune couche Dense trouvée dans le modèle.")
|
180 |
+
|
181 |
+
# ---- GRAD-CAM ----
|
182 |
+
def make_gradcam(image_pil, model, last_conv_layer_name, class_index, progress=None):
|
183 |
+
if model is None: return np.array(image_pil)
|
184 |
+
start_time = time.time()
|
185 |
+
try:
|
186 |
+
_update_progress(progress, 0, desc="Préparation de l'image...")
|
187 |
+
input_size = model.input_shape[1:3]
|
188 |
+
|
189 |
+
if 'xception' in model.name.lower():
|
190 |
+
preprocessor = preprocess_xception
|
191 |
+
elif 'resnet50' in model.name.lower():
|
192 |
+
preprocessor = preprocess_resnet
|
193 |
+
elif 'densenet' in model.name.lower():
|
194 |
+
preprocessor = preprocess_densenet
|
195 |
+
else:
|
196 |
+
preprocessor = preprocess_densenet
|
197 |
+
|
198 |
+
img_np = np.array(image_pil.convert("RGB"))
|
199 |
+
img_resized = cv2.resize(img_np, input_size)
|
200 |
+
img_array_preprocessed = preprocessor(np.expand_dims(img_resized, axis=0))
|
201 |
+
|
202 |
+
_update_progress(progress, 25, desc="Calcul des gradients...")
|
203 |
+
|
204 |
+
try:
|
205 |
+
conv_layer = model.get_layer(last_conv_layer_name)
|
206 |
+
except ValueError:
|
207 |
+
return img_resized
|
208 |
+
|
209 |
+
dense_layer = find_last_dense_layer(model)
|
210 |
+
|
211 |
+
grad_model = Model(model.inputs, [conv_layer.output, model.output])
|
212 |
+
input_name = get_primary_input_name(model)
|
213 |
+
input_for_model = {input_name: img_array_preprocessed}
|
214 |
+
|
215 |
+
with tf.GradientTape() as tape:
|
216 |
+
last_conv_layer_output, preds = grad_model(input_for_model, training=False)
|
217 |
+
if isinstance(preds, list):
|
218 |
+
preds = preds[0]
|
219 |
+
class_channel = preds[:, int(class_index)]
|
220 |
+
|
221 |
+
grads = tape.gradient(class_channel, last_conv_layer_output)
|
222 |
+
|
223 |
+
if grads is None:
|
224 |
+
return img_resized
|
225 |
+
|
226 |
+
_update_progress(progress, 50, desc="Génération de la heatmap...")
|
227 |
+
|
228 |
+
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
|
229 |
+
last_conv_layer_output = last_conv_layer_output[0]
|
230 |
+
|
231 |
+
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
|
232 |
+
heatmap = tf.squeeze(heatmap)
|
233 |
+
heatmap = tf.maximum(heatmap, 0)
|
234 |
+
max_val = tf.math.reduce_max(heatmap)
|
235 |
+
|
236 |
+
if max_val == 0:
|
237 |
+
heatmap = tf.ones_like(heatmap) * 0.5
|
238 |
+
else:
|
239 |
+
heatmap = heatmap / max_val
|
240 |
+
|
241 |
+
heatmap_np = heatmap.numpy()
|
242 |
+
|
243 |
+
_update_progress(progress, 75, desc="Application de la superposition...")
|
244 |
+
|
245 |
+
heatmap_np = np.clip(heatmap_np.astype(np.float32), 0, 1)
|
246 |
+
heatmap_resized = cv2.resize(heatmap_np, (img_resized.shape[1], img_resized.shape[0]))
|
247 |
+
heatmap_uint8 = np.uint8(255 * heatmap_resized)
|
248 |
+
heatmap_colored = cv2.applyColorMap(heatmap_uint8, cv2.COLORMAP_JET)
|
249 |
+
|
250 |
+
img_bgr = cv2.cvtColor(img_resized, cv2.COLOR_RGB2BGR)
|
251 |
+
superimposed_img = cv2.addWeighted(img_bgr, 0.6, heatmap_colored, 0.4, 0)
|
252 |
+
|
253 |
+
_update_progress(progress, 100, desc="✅ Grad-CAM terminé !")
|
254 |
+
|
255 |
+
return cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB)
|
256 |
+
except Exception as e:
|
257 |
+
import traceback; traceback.print_exc()
|
258 |
+
return np.array(image_pil)
|
259 |
+
|
260 |
+
# ---- GESTION ASYNCHRONE / ÉTAT ----
|
261 |
+
current_image = None
|
262 |
+
current_predictions = None
|
263 |
+
|
264 |
+
# ---- Fonctions pour l'UI Gradio ----
|
265 |
+
def quick_predict_ui(image_pil):
|
266 |
+
global current_image, current_predictions
|
267 |
+
if image_pil is None:
|
268 |
+
return "Veuillez uploader une image.", None, "❌ Erreur: Aucune image fournie."
|
269 |
+
try:
|
270 |
+
current_image = image_pil
|
271 |
+
all_preds = predict_single(image_pil)
|
272 |
+
current_predictions = all_preds
|
273 |
+
ensemble_probs = all_preds["ensemble"]
|
274 |
+
|
275 |
+
top_class_idx = int(np.argmax(ensemble_probs))
|
276 |
+
top_class_name = CLASS_NAMES[top_class_idx]
|
277 |
+
global_diag = diagnosis_map[top_class_name]
|
278 |
+
|
279 |
+
confidences = {CLASS_NAMES[i]: float(ensemble_probs[i] * 100) for i in range(len(CLASS_NAMES))}
|
280 |
+
df = pd.DataFrame.from_dict(confidences, orient='index', columns=['Probabilité']).reset_index().rename(columns={'index': 'Classe'})
|
281 |
+
df = df.sort_values(by='Probabilité', ascending=False)
|
282 |
+
df['Pourcentage'] = df['Probabilité'].apply(lambda x: f"{x:.1f}%")
|
283 |
+
|
284 |
+
fig = px.bar(df,
|
285 |
+
x="Classe",
|
286 |
+
y="Probabilité",
|
287 |
+
color="Probabilité",
|
288 |
+
color_continuous_scale=px.colors.sequential.Viridis,
|
289 |
+
title="Probabilités par classe",
|
290 |
+
text="Pourcentage")
|
291 |
+
|
292 |
+
text_positions = []
|
293 |
+
for val in df['Probabilité']:
|
294 |
+
if val <= 10:
|
295 |
+
text_positions.append("outside")
|
296 |
+
else:
|
297 |
+
text_positions.append("inside")
|
298 |
+
fig.update_traces(textposition=text_positions)
|
299 |
+
fig.update_layout(xaxis_title="", yaxis_title="Probabilité (%)", height=400)
|
300 |
+
|
301 |
+
return f"{global_diag} ({top_class_name.upper()})", fig, "✅ Analyse terminée. Prêt pour Grad-CAM."
|
302 |
+
except Exception as e:
|
303 |
+
return f"Erreur: {e}", None, "❌ Erreur lors de l'analyse."
|
304 |
+
|
305 |
+
def generate_gradcam_ui(progress=gr.Progress()):
|
306 |
+
global current_image, current_predictions
|
307 |
+
if current_image is None or current_predictions is None:
|
308 |
+
return None, "❌ Aucun résultat précédent — lance d'abord l'analyse rapide."
|
309 |
+
try:
|
310 |
+
_update_progress(progress, 0, desc="Début de la génération Grad-CAM...")
|
311 |
+
ensemble_probs = current_predictions["ensemble"]
|
312 |
+
top_class_idx = int(np.argmax(ensemble_probs))
|
313 |
+
|
314 |
+
candidates = []
|
315 |
+
if model_xcept is not None: candidates.append(("xception", model_xcept, current_predictions["xception"][top_class_idx]))
|
316 |
+
if model_resnet50 is not None: candidates.append(("resnet50", model_resnet50, current_predictions["resnet50"][top_class_idx]))
|
317 |
+
if model_densenet is not None: candidates.append(("densenet201", model_densenet, current_predictions["densenet201"][top_class_idx]))
|
318 |
+
|
319 |
+
if not candidates:
|
320 |
+
return None, "❌ Aucun modèle disponible pour Grad-CAM."
|
321 |
+
|
322 |
+
explainer_model_name, explainer_model, conf = max(candidates, key=lambda t: t[2])
|
323 |
+
explainer_layer = LAST_CONV_LAYERS.get(explainer_model_name)
|
324 |
+
_update_progress(progress, 5, desc=f"Génération Grad-CAM avec {explainer_model_name}...")
|
325 |
+
|
326 |
+
gradcam_img = make_gradcam(current_image, explainer_model, explainer_layer, class_index=top_class_idx, progress=progress)
|
327 |
+
|
328 |
+
_update_progress(progress, 100, desc="✅ Grad-CAM généré !")
|
329 |
+
return gradcam_img, f"✅ Grad-CAM généré avec {explainer_model_name} (confiance: {conf:.1%})"
|
330 |
+
except Exception as e:
|
331 |
+
import traceback; traceback.print_exc()
|
332 |
+
return None, f"❌ Erreur: {e}"
|
333 |
+
|
334 |
+
# ---- INTERFACE GRADIO ----
|
335 |
+
example_paths = ["ISIC_0024627.jpg", "ISIC_0025539.jpg", "ISIC_0031410.jpg"]
|
336 |
+
|
337 |
+
with gr.Blocks(theme=gr.themes.Soft(), title="Analyse de lésions") as demo:
|
338 |
+
gr.Markdown("# 🔬 Analyse de lésions cutanées")
|
339 |
+
|
340 |
+
models_status = []
|
341 |
+
if model_resnet50: models_status.append("✅ ResNet50")
|
342 |
+
if model_densenet: models_status.append("✅ DenseNet201")
|
343 |
+
if model_xcept: models_status.append("✅ Xception")
|
344 |
+
gr.Markdown(f"**Modèles chargés:** {', '.join(models_status) if models_status else 'AUCUN'}")
|
345 |
+
|
346 |
with gr.Row():
|
347 |
+
with gr.Column(scale=1):
|
348 |
+
input_image = gr.Image(type="pil", label="📸 Uploader une image")
|
349 |
+
with gr.Row():
|
350 |
+
quick_btn = gr.Button("⚡ Analyse Rapide", variant="primary")
|
351 |
+
gradcam_btn = gr.Button("🎯 Carte de chaleur", variant="secondary")
|
352 |
+
gr.Examples(examples=example_paths, inputs=input_image)
|
353 |
+
with gr.Column(scale=2):
|
354 |
+
output_label = gr.Label(label="📊 Diagnostic global")
|
355 |
+
output_plot = gr.Plot(label="📈 Probabilités")
|
356 |
+
output_gradcam = gr.Image(label="🔍 Visualisation Grad-CAM")
|
357 |
+
output_status = gr.Textbox(label="Statut", interactive=False)
|
358 |
+
|
359 |
+
quick_btn.click(fn=quick_predict_ui, inputs=input_image, outputs=[output_label, output_plot, output_status])
|
360 |
+
gradcam_btn.click(fn=generate_gradcam_ui, inputs=[], outputs=[output_gradcam, output_status])
|
361 |
+
|
362 |
+
if __name__ == "__main__":
|
363 |
+
if all(m is None for m in [model_resnet50, model_densenet, model_xcept]):
|
364 |
+
print("\n\n🚨 ATTENTION: Aucun modèle n'a été chargé. L'application ne fonctionnera pas.\n\n")
|
365 |
+
demo.launch()
|
|
|
|
|
|