ericjedha commited on
Commit
1c99721
·
verified ·
1 Parent(s): 66d521b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -136
app.py CHANGED
@@ -20,9 +20,8 @@ diagnosis_map = {
20
  'vasc': 'Bénin',
21
  'mel': 'Malin'
22
  }
23
- NUM_CLASSES = len(CLASS_NAMES)
24
 
25
- # ---- Téléchargement et chargement des modèles .keras ----
26
  resnet_path = hf_hub_download(repo_id="ericjedha/resnet50", filename="Resnet50.keras")
27
  densenet_path = hf_hub_download(repo_id="ericjedha/densenet201", filename="Densenet201.keras")
28
 
@@ -39,129 +38,57 @@ def load_image(path, target_size):
39
  img = image.load_img(path, target_size=target_size)
40
  return image.img_to_array(img)
41
 
42
- # ---- Helpers robustes ----
43
- def to_numpy(y):
44
- # Convertit n'importe quelle sortie (Tensor / list / ndarray) en ndarray 2D (batch, classes)
45
- if isinstance(y, (list, tuple)):
46
- y = y[0]
47
- if isinstance(y, tf.Tensor):
48
- y = y.numpy()
49
- y = np.array(y)
50
- if y.ndim == 1:
51
- y = y[None, :]
52
- return y
53
-
54
- def safe_preds(arr, target_len=NUM_CLASSES):
55
- arr = to_numpy(arr)
56
- # pad/trim pour garantir 7 classes
57
- if arr.shape[1] < target_len:
58
- pad = np.zeros((arr.shape[0], target_len - arr.shape[1]), dtype=arr.dtype)
59
- arr = np.concatenate([arr, pad], axis=1)
60
- elif arr.shape[1] > target_len:
61
- arr = arr[:, :target_len]
62
- return arr
63
-
64
  def get_primary_input_name(model):
65
- # Essaie de retrouver un nom d'input si le modèle l'exige
66
- try:
67
- if hasattr(model, "input_names") and model.input_names:
68
- return model.input_names[0]
69
- except Exception:
70
- pass
71
- try:
72
- # ex: 'input_layer:0' -> 'input_layer'
73
- name = model.inputs[0].name
74
- if ":" in name:
75
- name = name.split(":")[0]
76
- return name
77
- except Exception:
78
- return None
79
 
80
  def safe_forward(model, x):
81
- """
82
- Fait passer x dans model en essayant plusieurs formats d'appel,
83
- pour contourner les modèles sauvegardés avec un input nommé (ex: 'input_layer').
84
- Retourne un np.ndarray (batch, classes).
85
- """
86
- # On s'assure du dtype float32 pour TF/OpenCV
87
  if isinstance(x, np.ndarray):
88
  x = x.astype(np.float32, copy=False)
89
 
90
- # 1) Appel direct en mode fonctionnel
91
- try:
92
- y = model(x, training=False)
93
- return safe_preds(y)
94
- except Exception:
95
- pass
96
 
97
- # 2) predict standard
98
  try:
99
- y = model.predict(x, verbose=0)
100
- return safe_preds(y)
101
- except Exception:
102
- pass
103
-
104
- # 3) Essayer avec nom d'input
105
- input_name = get_primary_input_name(model)
106
- if input_name is not None:
107
- try:
108
- y = model({input_name: x}, training=False)
109
- return safe_preds(y)
110
- except Exception:
111
- pass
112
- try:
113
- y = model.predict({input_name: x}, verbose=0)
114
- return safe_preds(y)
115
- except Exception:
116
- pass
117
-
118
- # Dernier recours: lever une erreur claire
119
- raise RuntimeError("Impossible de faire l'inférence: le modèle exige peut-être un input nommé et incompatible.")
120
 
121
  # ---- Prédiction single image ----
122
  def predict_single(img_path, weights=(0.45, 0.25, 0.30)):
123
- bx = preprocess_xception(np.expand_dims(load_image(img_path, (299, 299)), axis=0).astype(np.float32))
124
- br = preprocess_resnet(np.expand_dims(load_image(img_path, (224, 224)), axis=0).astype(np.float32))
125
- bd = preprocess_densenet(np.expand_dims(load_image(img_path, (224, 224)), axis=0).astype(np.float32))
126
 
127
- pred_x = safe_forward(model_xcept, bx)
128
  pred_r = safe_forward(model_resnet50, br)
129
  pred_d = safe_forward(model_densenet, bd)
130
 
131
- preds = weights[0]*pred_x + weights[1]*pred_r + weights[2]*pred_d
132
 
133
- # boost MEL avec DenseNet si index OK
134
- mel_idx = label_to_index.get('mel', None)
135
- if mel_idx is not None and mel_idx < preds.shape[1] and mel_idx < pred_d.shape[1]:
136
- preds[:, mel_idx] = 0.5*preds[:, mel_idx] + 0.5*pred_d[:, mel_idx]
137
 
138
  return preds[0]
139
 
140
- # ---- Grad-CAM ultra-robuste ----
141
- def make_gradcam(img_path, model, last_conv_layer_name=None, class_index=None):
142
  img = image.load_img(img_path, target_size=(224, 224))
143
- img_array = image.img_to_array(img).astype(np.float32)
144
  input_array = np.expand_dims(img_array, axis=0)
145
- input_array = preprocess_densenet(input_array).astype(np.float32)
146
 
147
- # Classe cible
148
  if class_index is None:
149
  preds = safe_forward(model, input_array)
150
- class_index = int(np.argmax(preds[0]))
151
-
152
- # Trouver automatiquement la dernière conv si besoin
153
- if last_conv_layer_name is None:
154
- for layer in reversed(model.layers):
155
- try:
156
- out = getattr(layer, "output", None)
157
- shp = getattr(out, "shape", None)
158
- if shp is not None and hasattr(shp, "__len__") and len(shp) == 4 and "conv" in layer.name:
159
- last_conv_layer_name = layer.name
160
- break
161
- except Exception:
162
- continue
163
- if last_conv_layer_name is None:
164
- raise ValueError("Impossible de trouver une couche convolutionnelle pour Grad-CAM.")
165
 
166
  grad_model = Model(
167
  inputs=model.inputs,
@@ -169,34 +96,24 @@ def make_gradcam(img_path, model, last_conv_layer_name=None, class_index=None):
169
  )
170
 
171
  with tf.GradientTape() as tape:
172
- conv_outputs, predictions = grad_model(input_array)
173
- if isinstance(predictions, (list, tuple)):
174
- predictions = predictions[0]
175
- predictions = tf.convert_to_tensor(predictions)
176
  loss = predictions[:, class_index]
177
 
178
- grads = tape.gradient(loss, conv_outputs)[0] # (H, W, C)
179
- pooled_grads = tf.reduce_mean(grads, axis=(0, 1)) # (C,)
180
- conv_map = conv_outputs[0] # (H, W, C)
181
 
182
- # Heatmap TF -> normalisation
183
- heatmap_tf = tf.reduce_sum(conv_map * pooled_grads, axis=-1) # (H, W)
184
- heatmap_tf = tf.nn.relu(heatmap_tf)
185
- max_val = tf.reduce_max(heatmap_tf)
186
- heatmap_tf = tf.where(max_val > 0, heatmap_tf / (max_val + 1e-6), tf.ones_like(heatmap_tf))
187
 
188
- # En numpy float32 pour OpenCV
189
- heatmap = heatmap_tf.numpy().astype(np.float32)
190
- if not np.isfinite(heatmap).all():
191
- heatmap = np.nan_to_num(heatmap, nan=0.0, posinf=1.0, neginf=0.0)
192
-
193
- heatmap = cv2.resize(heatmap, (224, 224))
194
- heatmap_u8 = np.uint8(np.clip(255 * heatmap, 0, 255))
195
- heatmap_color = cv2.applyColorMap(heatmap_u8, cv2.COLORMAP_JET)
196
 
197
  superimposed_img = cv2.addWeighted(
198
  cv2.cvtColor(img_array.astype("uint8"), cv2.COLOR_RGB2BGR),
199
- 0.6, heatmap_color, 0.4, 0
200
  )
201
  return cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB)
202
 
@@ -205,29 +122,33 @@ def gradio_predict(image_file):
205
  try:
206
  probs = predict_single(image_file)
207
 
208
- # Tri + barplot data
209
  sorted_idx = np.argsort(-probs)
210
  sorted_labels = [CLASS_NAMES[i].upper() for i in sorted_idx]
211
- sorted_probs = (probs[sorted_idx] * 100).astype(float)
212
 
213
- benign_prob = float(sum(probs[i] for i, cls in enumerate(CLASS_NAMES) if diagnosis_map[cls] == "Bénin"))
214
- malign_prob = float(sum(probs[i] for i, cls in enumerate(CLASS_NAMES) if diagnosis_map[cls] == "Malin"))
215
  global_diag = "Bénin" if benign_prob >= malign_prob else "Malin"
216
 
217
- bar_plot_data = {"x": sorted_labels, "y": sorted_probs.tolist()}
218
 
219
- # Grad-CAM (DenseNet)
220
- top_class = int(np.argmax(probs))
221
  gradcam_img = make_gradcam(image_file, model_densenet, class_index=top_class)
222
 
223
- return global_diag, bar_plot_data, gradcam_img
 
 
 
 
224
 
225
  except Exception as e:
226
  print("Erreur dans gradio_predict :", e)
227
- # Toujours renvoyer des types valides pour éviter JSONDecodeError côté Gradio
228
- return "Erreur", {"x": [], "y": []}, None
229
 
230
  # ---- Gradio UI ----
 
 
231
  demo = gr.Interface(
232
  fn=gradio_predict,
233
  inputs=gr.Image(type="filepath", label="Uploader une image de lésion"),
@@ -236,11 +157,10 @@ demo = gr.Interface(
236
  gr.BarPlot(label="Probabilités par classe"),
237
  gr.Image(label="Visualisation Grad-CAM")
238
  ],
239
- examples=[], # éviter les erreurs de cache sur Spaces
240
- title="Analyse de lésions cutanées (Ensemble + Grad-CAM)",
241
- description="Prédiction Bénin/Malin avec explication visuelle."
242
  )
243
 
244
  if __name__ == "__main__":
245
- # Sur Hugging Face Spaces, share=True n'est pas supporté.
246
  demo.launch()
 
20
  'vasc': 'Bénin',
21
  'mel': 'Malin'
22
  }
 
23
 
24
+ # ---- Téléchargement modèles ----
25
  resnet_path = hf_hub_download(repo_id="ericjedha/resnet50", filename="Resnet50.keras")
26
  densenet_path = hf_hub_download(repo_id="ericjedha/densenet201", filename="Densenet201.keras")
27
 
 
38
  img = image.load_img(path, target_size=target_size)
39
  return image.img_to_array(img)
40
 
41
+ # ---- Wrapper robuste pour prédictions ----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def get_primary_input_name(model):
43
+ """Retourne le vrai nom d'input du modèle Keras."""
44
+ if isinstance(model.inputs, list) and hasattr(model.inputs[0], "name"):
45
+ return model.inputs[0].name.split(":")[0]
46
+ return None
 
 
 
 
 
 
 
 
 
 
47
 
48
  def safe_forward(model, x):
49
+ """Forward pass qui évite les crashs liés aux noms d’inputs."""
 
 
 
 
 
50
  if isinstance(x, np.ndarray):
51
  x = x.astype(np.float32, copy=False)
52
 
53
+ input_name = get_primary_input_name(model)
 
 
 
 
 
54
 
 
55
  try:
56
+ if input_name:
57
+ return model({input_name: x}, training=False).numpy()
58
+ else:
59
+ return model(x, training=False).numpy()
60
+ except Exception as e:
61
+ print(f"[safe_forward] Erreur avec {model.name}: {e}")
62
+ return np.zeros((1, len(CLASS_NAMES))) # fallback
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
  # ---- Prédiction single image ----
65
  def predict_single(img_path, weights=(0.45, 0.25, 0.30)):
66
+ bx = preprocess_xception(np.expand_dims(load_image(img_path, (299, 299)), axis=0))
67
+ br = preprocess_resnet(np.expand_dims(load_image(img_path, (224, 224)), axis=0))
68
+ bd = preprocess_densenet(np.expand_dims(load_image(img_path, (224, 224)), axis=0))
69
 
70
+ pred_x = safe_forward(model_xcept, bx)
71
  pred_r = safe_forward(model_resnet50, br)
72
  pred_d = safe_forward(model_densenet, bd)
73
 
74
+ preds = (weights[0] * pred_x + weights[1] * pred_r + weights[2] * pred_d)
75
 
76
+ # Boost MEL avec DenseNet
77
+ mel_idx = label_to_index['mel']
78
+ preds[:, mel_idx] = (0.5 * preds[:, mel_idx] + 0.5 * pred_d[:, mel_idx])
 
79
 
80
  return preds[0]
81
 
82
+ # ---- Grad-CAM ----
83
+ def make_gradcam(img_path, model, last_conv_layer_name="conv5_block32_concat", class_index=None):
84
  img = image.load_img(img_path, target_size=(224, 224))
85
+ img_array = image.img_to_array(img)
86
  input_array = np.expand_dims(img_array, axis=0)
87
+ input_array = preprocess_densenet(input_array)
88
 
 
89
  if class_index is None:
90
  preds = safe_forward(model, input_array)
91
+ class_index = np.argmax(preds[0])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  grad_model = Model(
94
  inputs=model.inputs,
 
96
  )
97
 
98
  with tf.GradientTape() as tape:
99
+ conv_outputs, predictions = grad_model(input_array, training=False)
 
 
 
100
  loss = predictions[:, class_index]
101
 
102
+ grads = tape.gradient(loss, conv_outputs)[0]
103
+ pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
 
104
 
105
+ conv_outputs = conv_outputs[0]
106
+ heatmap = conv_outputs @ pooled_grads[..., tf.newaxis]
107
+ heatmap = tf.squeeze(heatmap)
108
+ heatmap = np.maximum(heatmap, 0) / (np.max(heatmap) + 1e-6)
 
109
 
110
+ heatmap = cv2.resize(heatmap.numpy(), (224, 224))
111
+ heatmap = np.uint8(255 * heatmap)
112
+ heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
 
 
 
 
 
113
 
114
  superimposed_img = cv2.addWeighted(
115
  cv2.cvtColor(img_array.astype("uint8"), cv2.COLOR_RGB2BGR),
116
+ 0.6, heatmap, 0.4, 0
117
  )
118
  return cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB)
119
 
 
122
  try:
123
  probs = predict_single(image_file)
124
 
 
125
  sorted_idx = np.argsort(-probs)
126
  sorted_labels = [CLASS_NAMES[i].upper() for i in sorted_idx]
127
+ sorted_probs = probs[sorted_idx] * 100
128
 
129
+ benign_prob = sum(probs[i] for i, cls in enumerate(CLASS_NAMES) if diagnosis_map[cls] == "Bénin")
130
+ malign_prob = sum(probs[i] for i, cls in enumerate(CLASS_NAMES) if diagnosis_map[cls] == "Malin")
131
  global_diag = "Bénin" if benign_prob >= malign_prob else "Malin"
132
 
133
+ bar_data = {"Classes": sorted_labels, "Probabilité (%)": sorted_probs.tolist()}
134
 
135
+ # Grad-CAM sur la meilleure classe
136
+ top_class = np.argmax(probs)
137
  gradcam_img = make_gradcam(image_file, model_densenet, class_index=top_class)
138
 
139
+ return global_diag, gr.BarPlot.update(
140
+ value=bar_data,
141
+ x="Classes", y="Probabilité (%)",
142
+ title="Distribution des classes"
143
+ ), gradcam_img
144
 
145
  except Exception as e:
146
  print("Erreur dans gradio_predict :", e)
147
+ return "Erreur", None, None
 
148
 
149
  # ---- Gradio UI ----
150
+ examples = ["exemple1.jpg", "exemple2.jpg", "exemple3.jpg"]
151
+
152
  demo = gr.Interface(
153
  fn=gradio_predict,
154
  inputs=gr.Image(type="filepath", label="Uploader une image de lésion"),
 
157
  gr.BarPlot(label="Probabilités par classe"),
158
  gr.Image(label="Visualisation Grad-CAM")
159
  ],
160
+ examples=examples,
161
+ title="Analyse de lésions cutanées (Ensemble de modèles + Grad-CAM)",
162
+ description="Cet outil propose une prédiction de la nature de la lésion (Bénin/Malin) avec explication visuelle."
163
  )
164
 
165
  if __name__ == "__main__":
 
166
  demo.launch()