edyoshikun commited on
Commit
e805cf3
·
1 Parent(s): f875ce0

changing submission button for intutive usage

Browse files
Files changed (2) hide show
  1. app.py +317 -270
  2. style.css +19 -0
app.py CHANGED
@@ -15,18 +15,25 @@ class VSGradio:
15
  self.model_config = model_config
16
  self.model_ckpt_path = model_ckpt_path
17
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
18
  self.model = None
19
  self.load_model()
20
 
21
  def load_model(self):
22
- # Load the model checkpoint and move it to the correct device (GPU or CPU)
23
- self.model = VSUNet.load_from_checkpoint(
24
- self.model_ckpt_path,
25
- architecture="UNeXt2_2D",
26
- model_config=self.model_config,
27
- )
28
- self.model.to(self.device) # Move the model to the correct device (GPU/CPU)
29
- self.model.eval()
 
 
 
 
 
 
30
 
31
  def normalize_fov(self, input: ArrayLike):
32
  "Normalizing the fov with zero mean and unit variance"
@@ -47,45 +54,55 @@ class VSGradio:
47
  return resize(inp, (new_height, new_width), anti_aliasing=True)
48
 
49
  def predict(self, inp, scaling_factor: float):
50
- # Normalize the input and convert to tensor
51
- inp = self.normalize_fov(inp)
52
- original_shape = inp.shape
53
- # Resize the input image to the expected cell diameter
54
- inp = apply_rescale_image(inp, scaling_factor)
55
-
56
- # Convert the input to a tensor
57
- inp = torch.from_numpy(np.array(inp).astype(np.float32))
58
-
59
- # Prepare the input dictionary and move input to the correct device (GPU or CPU)
60
- test_dict = dict(
61
- index=None,
62
- source=inp.unsqueeze(0).unsqueeze(0).unsqueeze(0).to(self.device),
63
- )
 
 
 
 
 
64
 
65
- # Run model inference
66
- with torch.inference_mode():
67
- self.model.on_predict_start() # Necessary preprocessing for the model
68
- pred = (
69
- self.model.predict_step(test_dict, 0, 0).cpu().numpy()
70
- ) # Move output back to CPU for post-processing
71
 
72
- # Post-process the model output and rescale intensity
73
- nuc_pred = pred[0, 0, 0]
74
- mem_pred = pred[0, 1, 0]
75
 
76
- # Resize predictions back to the original image size
77
- nuc_pred = resize(nuc_pred, original_shape, anti_aliasing=True)
78
- mem_pred = resize(mem_pred, original_shape, anti_aliasing=True)
79
 
80
- # Define colormaps
81
- green_colormap = cmap.Colormap("green") # Nucleus: black to green
82
- magenta_colormap = cmap.Colormap("magenta")
83
 
84
- # Apply the colormap to the predictions
85
- nuc_rgb = apply_colormap(nuc_pred, green_colormap)
86
- mem_rgb = apply_colormap(mem_pred, magenta_colormap)
87
 
88
- return nuc_rgb, mem_rgb # Return both nucleus and membrane images
 
 
 
 
 
89
 
90
 
91
  def apply_colormap(prediction, colormap: cmap.Colormap):
@@ -146,250 +163,280 @@ def load_css(file_path):
146
 
147
 
148
  if __name__ == "__main__":
149
- # Download the model checkpoint from Hugging Face
150
- model_ckpt_path = hf_hub_download(
151
- repo_id="compmicro-czb/VSCyto2D", filename="epoch=399-step=23200.ckpt"
152
- )
153
-
154
- # Model configuration
155
- model_config = {
156
- "in_channels": 1,
157
- "out_channels": 2,
158
- "encoder_blocks": [3, 3, 9, 3],
159
- "dims": [96, 192, 384, 768],
160
- "decoder_conv_blocks": 2,
161
- "stem_kernel_size": [1, 2, 2],
162
- "in_stack_depth": 1,
163
- "pretraining": False,
164
- }
165
-
166
- vsgradio = VSGradio(model_config, model_ckpt_path)
167
-
168
- # Initialize the Gradio app using Blocks
169
- with gr.Blocks(css=load_css("style.css")) as demo:
170
- # Title and description
171
- gr.HTML(
172
- """
173
- <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
174
- <a href="https://www.czbiohub.org/sf/" target="_blank">
175
- <img src="https://huggingface.co/spaces/compmicro-czb/VirtualStaining/resolve/main/misc/czb_mark.png" style="width: 100px; height: auto; margin-right: 10px;">
176
- </a>
177
- <div class='title-block'> Robust virtual staining of landmark organelles with Cytoland </div>
178
- </div>
179
- """
180
  )
181
- gr.HTML(
182
- """
183
- <div class='description-block'>
184
- <p><b>Model:</b> VSCyto2D</p>
185
- <p><b>Input:</b> label-free image (e.g., QPI or phase contrast).</p>
186
- <p><b>Output:</b> Virtual staining of nucleus and membrane.</p>
187
- <p><b>Note:</b> The model works well with QPI, and sometimes generalizes to phase contrast and DIC.<br>
188
- It was trained primarily on HEK293T, BJ5, and A549 cells imaged at 20x. <br>
189
- We continue to diagnose and improve generalization<p>
190
- <p>Check out our preprint: <a href='https://www.biorxiv.org/content/10.1101/2024.05.31.596901' target='_blank'><i>Liu et al., Robust virtual staining of landmark organelles</i></a></p>
191
- <p> For training your own model and analyzing large amounts of data, use our <a href='https://github.com/mehta-lab/VisCy/tree/main/examples/virtual_staining/dlmbl_exercise' target='_blank'>GitHub repository</a>.</p>
192
- </div>
193
- """
194
- )
195
-
196
- # Layout for input and output images
197
- with gr.Row():
198
- input_image = gr.Image(type="numpy", image_mode="L", label="Upload Image")
199
- adjusted_image = gr.Image(
200
- type="numpy",
201
- image_mode="L",
202
- label="Adjusted Image (Preview)",
203
- interactive=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  )
205
 
206
- with gr.Column():
207
- output_nucleus = gr.Image(
208
- type="numpy", image_mode="RGB", label="VS Nucleus"
 
209
  )
210
- output_membrane = gr.Image(
211
- type="numpy", image_mode="RGB", label="VS Membrane"
 
 
 
212
  )
213
- merged_image = gr.Image(
214
- type="numpy", image_mode="RGB", label="Merged Image", visible=False
215
- )
216
-
217
- # Checkbox for applying invert
218
- preprocess_invert = gr.Checkbox(label="Invert Image", value=False)
219
-
220
- # Slider for gamma adjustment
221
- gamma_factor = gr.Slider(
222
- label="Adjust Gamma", minimum=0.01, maximum=5.0, value=1.0, step=0.1
223
- )
224
 
225
- # Input field for the cell diameter in microns
226
- scaling_factor = gr.Textbox(
227
- label="Rescaling image factor",
228
- value="1.0",
229
- placeholder="Rescaling factor for the input image",
230
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
 
232
- # Checkbox for merging predictions
233
- merge_checkbox = gr.Checkbox(
234
- label="Merge Predictions into one image", value=True
235
- )
 
 
236
 
237
- input_image.change(
238
- fn=apply_image_adjustments,
239
- inputs=[input_image, preprocess_invert, gamma_factor],
240
- outputs=adjusted_image,
241
- )
242
 
243
- gamma_factor.change(
244
- fn=apply_image_adjustments,
245
- inputs=[input_image, preprocess_invert, gamma_factor],
246
- outputs=adjusted_image,
247
- )
248
- cell_name = gr.Textbox(
249
- label="Cell Name", placeholder="Cell Type", visible=False
250
- )
251
- imaging_modality = gr.Textbox(
252
- label="Imaging Modality", placeholder="Imaging Modality", visible=False
253
- )
254
- references = gr.Textbox(
255
- label="References", placeholder="References", visible=False
256
- )
257
 
258
- preprocess_invert.change(
259
- fn=apply_image_adjustments,
260
- inputs=[input_image, preprocess_invert, gamma_factor],
261
- outputs=adjusted_image,
262
- )
 
 
 
 
 
 
 
 
 
263
 
264
- # Button to trigger prediction and update the output images
265
- submit_button = gr.Button("Submit")
266
-
267
- # Function to handle prediction and merging if needed
268
- def submit_and_merge(inp, scaling_factor, merge):
269
- nucleus, membrane = vsgradio.predict(inp, scaling_factor)
270
- if merge:
271
- merged = merge_images(nucleus, membrane)
272
- return (
273
- merged,
274
- gr.update(visible=True),
275
- nucleus,
276
- gr.update(visible=False),
277
- membrane,
278
- gr.update(visible=False),
279
- )
280
- else:
281
- return (
282
- None,
283
- gr.update(visible=False),
284
- nucleus,
285
- gr.update(visible=True),
286
- membrane,
287
- gr.update(visible=True),
288
- )
289
 
290
- submit_button.click(
291
- fn=submit_and_merge,
292
- inputs=[adjusted_image, scaling_factor, merge_checkbox],
293
- outputs=[
294
- merged_image,
295
- merged_image,
296
- output_nucleus,
297
- output_nucleus,
298
- output_membrane,
299
- output_membrane,
300
- ],
301
- )
302
- # Clear everything when the input image changes
303
- input_image.change(
304
- fn=clear_outputs,
305
- inputs=input_image,
306
- outputs=[adjusted_image, output_nucleus, output_membrane],
307
- )
308
 
309
- # Function to handle merging the two predictions after they are shown
310
- def merge_predictions_fn(nucleus_image, membrane_image, merge):
311
- if merge:
312
- merged = merge_images(nucleus_image, membrane_image)
313
- return (
314
- merged,
315
- gr.update(visible=True),
316
- gr.update(visible=False),
317
- gr.update(visible=False),
318
- )
319
- else:
320
- return (
321
- None,
322
- gr.update(visible=False),
323
- gr.update(visible=True),
324
- gr.update(visible=True),
325
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
 
327
- # Toggle between merged and separate views when the checkbox is checked
328
- merge_checkbox.change(
329
- fn=merge_predictions_fn,
330
- inputs=[output_nucleus, output_membrane, merge_checkbox],
331
- outputs=[merged_image, merged_image, output_nucleus, output_membrane],
332
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
- # Example images and article
335
- examples_component = gr.Examples(
336
- examples=[
337
- ["examples/a549.png", "A549", "QPI", 1.0, False, "1.0", "1"],
338
- ["examples/hek.png", "HEK293T", "QPI", 1.0, False, "1.0", "1"],
339
- ["examples/HEK_PhC.png", "HEK293T", "PhC", 1.2, True, "1.0", "1"],
340
- ["examples/livecell_A172.png", "A172", "PhC", 1.0, True, "1.0", "2"],
341
- ["examples/ctc_HeLa.png", "HeLa", "DIC", 0.7, False, "0.7", "3"],
342
- [
343
- "examples/ctc_glioblastoma_astrocytoma_U373.png",
344
- "Glioblastoma",
345
- "PhC",
346
- 1.0,
347
- True,
348
- "2.0",
349
- "3",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
  ],
351
- ["examples/U2OS_BF.png", "U2OS", "Brightfield", 1.0, False, "0.3", "4"],
352
- ["examples/U2OS_QPI.png", "U2OS", "QPI", 1.0, False, "0.3", "4"],
353
- [
354
- "examples/neuromast2.png",
355
- "Zebrafish neuromast",
356
- "QPI",
357
- 0.6,
358
- False,
359
- "1.2",
360
- "1",
361
  ],
362
- [
363
- "examples/mousekidney.png",
364
- "Mouse Kidney",
365
- "QPI",
366
- 0.8,
367
- False,
368
- "0.6",
369
- "4",
370
- ],
371
- ],
372
- inputs=[
373
- input_image,
374
- cell_name,
375
- imaging_modality,
376
- gamma_factor,
377
- preprocess_invert,
378
- scaling_factor,
379
- references,
380
- ],
381
- )
382
- # Article or footer information
383
- gr.HTML(
384
- """
385
- <div class='article-block'>
386
- <li>1. <a href='https://www.biorxiv.org/content/10.1101/2024.05.31.596901' target='_blank'>Liu et al., Robust virtual staining of landmark organelles</a></li>
387
- <li>2. <a href='https://sartorius-research.github.io/LIVECell/' target='_blank'>Edlund et. al. LIVECEll-A large-scale dataset for label-free live cell segmentation</a></li>
388
- <li>3. <a href='https://celltrackingchallenge.net/' target='_blank'>Maska et. al.,The cell tracking challenge: 10 years of objective benchmarking </a></li>
389
- <li>4. <a href='https://elifesciences.org/articles/55502' target='_blank'>Guo et. al., Revealing architectural order with quantitative label-free imaging and deep learning</a></li>
390
- </div>
391
- """
392
- )
393
 
394
- # Launch the Gradio app
395
- demo.launch(server_name="0.0.0.0", server_port=7860)
 
 
 
15
  self.model_config = model_config
16
  self.model_ckpt_path = model_ckpt_path
17
  self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
18
+ print(f"Using device: {self.device}")
19
  self.model = None
20
  self.load_model()
21
 
22
  def load_model(self):
23
+ try:
24
+ # Load the model checkpoint and move it to the correct device (GPU or CPU)
25
+ print(f"Loading model from checkpoint: {self.model_ckpt_path}")
26
+ self.model = VSUNet.load_from_checkpoint(
27
+ self.model_ckpt_path,
28
+ architecture="UNeXt2_2D",
29
+ model_config=self.model_config,
30
+ )
31
+ self.model.to(self.device) # Move the model to the correct device (GPU/CPU)
32
+ self.model.eval()
33
+ print("Model loaded successfully and set to evaluation mode")
34
+ except Exception as e:
35
+ print(f"Error loading model: {e}")
36
+ raise
37
 
38
  def normalize_fov(self, input: ArrayLike):
39
  "Normalizing the fov with zero mean and unit variance"
 
54
  return resize(inp, (new_height, new_width), anti_aliasing=True)
55
 
56
  def predict(self, inp, scaling_factor: float):
57
+ try:
58
+ if inp is None:
59
+ print("Error: Input image is None")
60
+ return None, None
61
+
62
+ # Normalize the input and convert to tensor
63
+ inp = self.normalize_fov(inp)
64
+ original_shape = inp.shape
65
+ # Resize the input image to the expected cell diameter
66
+ inp = apply_rescale_image(inp, scaling_factor)
67
+
68
+ # Convert the input to a tensor
69
+ inp = torch.from_numpy(np.array(inp).astype(np.float32))
70
+
71
+ # Prepare the input dictionary and move input to the correct device (GPU or CPU)
72
+ test_dict = dict(
73
+ index=None,
74
+ source=inp.unsqueeze(0).unsqueeze(0).unsqueeze(0).to(self.device),
75
+ )
76
 
77
+ # Run model inference
78
+ with torch.inference_mode():
79
+ self.model.on_predict_start() # Necessary preprocessing for the model
80
+ pred = (
81
+ self.model.predict_step(test_dict, 0, 0).cpu().numpy()
82
+ ) # Move output back to CPU for post-processing
83
 
84
+ # Post-process the model output and rescale intensity
85
+ nuc_pred = pred[0, 0, 0]
86
+ mem_pred = pred[0, 1, 0]
87
 
88
+ # Resize predictions back to the original image size
89
+ nuc_pred = resize(nuc_pred, original_shape, anti_aliasing=True)
90
+ mem_pred = resize(mem_pred, original_shape, anti_aliasing=True)
91
 
92
+ # Define colormaps
93
+ green_colormap = cmap.Colormap("green") # Nucleus: black to green
94
+ magenta_colormap = cmap.Colormap("magenta")
95
 
96
+ # Apply the colormap to the predictions
97
+ nuc_rgb = apply_colormap(nuc_pred, green_colormap)
98
+ mem_rgb = apply_colormap(mem_pred, magenta_colormap)
99
 
100
+ return nuc_rgb, mem_rgb # Return both nucleus and membrane images
101
+ except Exception as e:
102
+ print(f"Error during prediction: {e}")
103
+ # Return empty images of the right shape and type in case of error
104
+ empty_img = np.zeros((300, 300, 3), dtype=np.uint8)
105
+ return empty_img, empty_img
106
 
107
 
108
  def apply_colormap(prediction, colormap: cmap.Colormap):
 
163
 
164
 
165
  if __name__ == "__main__":
166
+ try:
167
+ # Download the model checkpoint from Hugging Face
168
+ print("Downloading model checkpoint...")
169
+ model_ckpt_path = hf_hub_download(
170
+ repo_id="compmicro-czb/VSCyto2D", filename="epoch=399-step=23200.ckpt"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  )
172
+ print(f"Model downloaded successfully to: {model_ckpt_path}")
173
+
174
+ # Model configuration
175
+ model_config = {
176
+ "in_channels": 1,
177
+ "out_channels": 2,
178
+ "encoder_blocks": [3, 3, 9, 3],
179
+ "dims": [96, 192, 384, 768],
180
+ "decoder_conv_blocks": 2,
181
+ "stem_kernel_size": [1, 2, 2],
182
+ "in_stack_depth": 1,
183
+ "pretraining": False,
184
+ }
185
+
186
+ print("Initializing VSGradio...")
187
+ vsgradio = VSGradio(model_config, model_ckpt_path)
188
+ print(f"VSGradio initialized successfully! Using device: {vsgradio.device}")
189
+
190
+ # Initialize the Gradio app using Blocks
191
+ with gr.Blocks(css=load_css("style.css")) as demo:
192
+ # Title and description
193
+ gr.HTML(
194
+ """
195
+ <div style="display: flex; justify-content: center; align-items: center; text-align: center;">
196
+ <a href="https://www.czbiohub.org/sf/" target="_blank">
197
+ <img src="https://huggingface.co/spaces/compmicro-czb/VirtualStaining/resolve/main/misc/czb_mark.png" style="width: 100px; height: auto; margin-right: 10px;">
198
+ </a>
199
+ <div class='title-block'> Robust virtual staining of landmark organelles with Cytoland </div>
200
+ </div>
201
+ """
202
+ )
203
+ gr.HTML(
204
+ """
205
+ <div class='description-block'>
206
+ <p><b>Model:</b> VSCyto2D</p>
207
+ <p><b>Input:</b> label-free image (e.g., QPI or phase contrast).</p>
208
+ <p><b>Output:</b> Virtual staining of nucleus and membrane.</p>
209
+ <p><b>Note:</b> The model works well with QPI, and sometimes generalizes to phase contrast and DIC.<br>
210
+ It was trained primarily on HEK293T, BJ5, and A549 cells imaged at 20x. <br>
211
+ We continue to diagnose and improve generalization<p>
212
+ <p>Check out our preprint: <a href='https://www.biorxiv.org/content/10.1101/2024.05.31.596901' target='_blank'><i>Liu et al., Robust virtual staining of landmark organelles</i></a></p>
213
+ <p> For training your own model and analyzing large amounts of data, use our <a href='https://github.com/mehta-lab/VisCy/tree/main/examples/virtual_staining/dlmbl_exercise' target='_blank'>GitHub repository</a>.</p>
214
+ </div>
215
+ """
216
  )
217
 
218
+ # Layout for input and output images
219
+ with gr.Row():
220
+ input_image = gr.Image(
221
+ type="numpy", image_mode="L", label="Upload Image"
222
  )
223
+ adjusted_image = gr.Image(
224
+ type="numpy",
225
+ image_mode="L",
226
+ label="Adjusted Image (Preview)",
227
+ interactive=False,
228
  )
 
 
 
 
 
 
 
 
 
 
 
229
 
230
+ with gr.Column():
231
+ output_nucleus = gr.Image(
232
+ type="numpy", image_mode="RGB", label="VS Nucleus"
233
+ )
234
+ output_membrane = gr.Image(
235
+ type="numpy", image_mode="RGB", label="VS Membrane"
236
+ )
237
+ merged_image = gr.Image(
238
+ type="numpy",
239
+ image_mode="RGB",
240
+ label="Merged Image",
241
+ visible=False,
242
+ )
243
+
244
+ # Checkbox for applying invert
245
+ preprocess_invert = gr.Checkbox(label="Invert Image", value=False)
246
+
247
+ # Slider for gamma adjustment
248
+ gamma_factor = gr.Slider(
249
+ label="Adjust Gamma", minimum=0.01, maximum=5.0, value=1.0, step=0.1
250
+ )
251
 
252
+ # Input field for the cell diameter in microns
253
+ scaling_factor = gr.Textbox(
254
+ label="Rescaling image factor",
255
+ value="1.0",
256
+ placeholder="Rescaling factor for the input image",
257
+ )
258
 
259
+ # Checkbox for merging predictions
260
+ merge_checkbox = gr.Checkbox(
261
+ label="Merge Predictions into one image", value=True
262
+ )
 
263
 
264
+ input_image.change(
265
+ fn=apply_image_adjustments,
266
+ inputs=[input_image, preprocess_invert, gamma_factor],
267
+ outputs=adjusted_image,
268
+ )
 
 
 
 
 
 
 
 
 
269
 
270
+ gamma_factor.change(
271
+ fn=apply_image_adjustments,
272
+ inputs=[input_image, preprocess_invert, gamma_factor],
273
+ outputs=adjusted_image,
274
+ )
275
+ cell_name = gr.Textbox(
276
+ label="Cell Name", placeholder="Cell Type", visible=False
277
+ )
278
+ imaging_modality = gr.Textbox(
279
+ label="Imaging Modality", placeholder="Imaging Modality", visible=False
280
+ )
281
+ references = gr.Textbox(
282
+ label="References", placeholder="References", visible=False
283
+ )
284
 
285
+ preprocess_invert.change(
286
+ fn=apply_image_adjustments,
287
+ inputs=[input_image, preprocess_invert, gamma_factor],
288
+ outputs=adjusted_image,
289
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
 
291
+ # Button to trigger prediction and update the output images
292
+ submit_button = gr.Button(
293
+ "Virtually Stain Image", elem_classes=["submit-button"]
294
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
 
296
+ # Function to handle prediction and merging if needed
297
+ def submit_and_merge(inp, scaling_factor, merge):
298
+ nucleus, membrane = vsgradio.predict(inp, scaling_factor)
299
+ if merge:
300
+ merged = merge_images(nucleus, membrane)
301
+ return (
302
+ merged,
303
+ gr.update(visible=True),
304
+ nucleus,
305
+ gr.update(visible=False),
306
+ membrane,
307
+ gr.update(visible=False),
308
+ )
309
+ else:
310
+ return (
311
+ None,
312
+ gr.update(visible=False),
313
+ nucleus,
314
+ gr.update(visible=True),
315
+ membrane,
316
+ gr.update(visible=True),
317
+ )
318
+
319
+ submit_button.click(
320
+ fn=submit_and_merge,
321
+ inputs=[adjusted_image, scaling_factor, merge_checkbox],
322
+ outputs=[
323
+ merged_image,
324
+ merged_image,
325
+ output_nucleus,
326
+ output_nucleus,
327
+ output_membrane,
328
+ output_membrane,
329
+ ],
330
+ )
331
+ # Clear everything when the input image changes
332
+ input_image.change(
333
+ fn=clear_outputs,
334
+ inputs=input_image,
335
+ outputs=[adjusted_image, output_nucleus, output_membrane],
336
+ )
337
 
338
+ # Function to handle merging the two predictions after they are shown
339
+ def merge_predictions_fn(nucleus_image, membrane_image, merge):
340
+ if merge:
341
+ merged = merge_images(nucleus_image, membrane_image)
342
+ return (
343
+ merged,
344
+ gr.update(visible=True),
345
+ gr.update(visible=False),
346
+ gr.update(visible=False),
347
+ )
348
+ else:
349
+ return (
350
+ None,
351
+ gr.update(visible=False),
352
+ gr.update(visible=True),
353
+ gr.update(visible=True),
354
+ )
355
+
356
+ # Toggle between merged and separate views when the checkbox is checked
357
+ merge_checkbox.change(
358
+ fn=merge_predictions_fn,
359
+ inputs=[output_nucleus, output_membrane, merge_checkbox],
360
+ outputs=[merged_image, merged_image, output_nucleus, output_membrane],
361
+ )
362
 
363
+ # Example images and article
364
+ examples_component = gr.Examples(
365
+ examples=[
366
+ ["examples/a549.png", "A549", "QPI", 1.0, False, "1.0", "1"],
367
+ ["examples/hek.png", "HEK293T", "QPI", 1.0, False, "1.0", "1"],
368
+ ["examples/HEK_PhC.png", "HEK293T", "PhC", 1.2, True, "1.0", "1"],
369
+ [
370
+ "examples/livecell_A172.png",
371
+ "A172",
372
+ "PhC",
373
+ 1.0,
374
+ True,
375
+ "1.0",
376
+ "2",
377
+ ],
378
+ ["examples/ctc_HeLa.png", "HeLa", "DIC", 0.7, False, "0.7", "3"],
379
+ [
380
+ "examples/ctc_glioblastoma_astrocytoma_U373.png",
381
+ "Glioblastoma",
382
+ "PhC",
383
+ 1.0,
384
+ True,
385
+ "2.0",
386
+ "3",
387
+ ],
388
+ [
389
+ "examples/U2OS_BF.png",
390
+ "U2OS",
391
+ "Brightfield",
392
+ 1.0,
393
+ False,
394
+ "0.3",
395
+ "4",
396
+ ],
397
+ ["examples/U2OS_QPI.png", "U2OS", "QPI", 1.0, False, "0.3", "4"],
398
+ [
399
+ "examples/neuromast2.png",
400
+ "Zebrafish neuromast",
401
+ "QPI",
402
+ 0.6,
403
+ False,
404
+ "1.2",
405
+ "1",
406
+ ],
407
+ [
408
+ "examples/mousekidney.png",
409
+ "Mouse Kidney",
410
+ "QPI",
411
+ 0.8,
412
+ False,
413
+ "0.6",
414
+ "4",
415
+ ],
416
  ],
417
+ inputs=[
418
+ input_image,
419
+ cell_name,
420
+ imaging_modality,
421
+ gamma_factor,
422
+ preprocess_invert,
423
+ scaling_factor,
424
+ references,
 
 
425
  ],
426
+ )
427
+ # Article or footer information
428
+ gr.HTML(
429
+ """
430
+ <div class='article-block'>
431
+ <li>1. <a href='https://www.biorxiv.org/content/10.1101/2024.05.31.596901' target='_blank'>Liu et al., Robust virtual staining of landmark organelles</a></li>
432
+ <li>2. <a href='https://sartorius-research.github.io/LIVECell/' target='_blank'>Edlund et. al. LIVECEll-A large-scale dataset for label-free live cell segmentation</a></li>
433
+ <li>3. <a href='https://celltrackingchallenge.net/' target='_blank'>Maska et. al.,The cell tracking challenge: 10 years of objective benchmarking </a></li>
434
+ <li>4. <a href='https://elifesciences.org/articles/55502' target='_blank'>Guo et. al., Revealing architectural order with quantitative label-free imaging and deep learning</a></li>
435
+ </div>
436
+ """
437
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
438
 
439
+ # Launch the Gradio app
440
+ demo.launch()
441
+ except Exception as e:
442
+ print(f"Error initializing VSGradio: {e}")
style.css CHANGED
@@ -27,3 +27,22 @@
27
  margin-top: 30px;
28
  /* No color or background settings */
29
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  margin-top: 30px;
28
  /* No color or background settings */
29
  }
30
+
31
+ /* Prominent Submit Button */
32
+ .submit-button {
33
+ background-color: #007bff !important;
34
+ color: white !important;
35
+ font-size: 18px !important;
36
+ font-weight: bold !important;
37
+ padding: 12px 24px !important;
38
+ border-radius: 8px !important;
39
+ margin: 15px auto !important;
40
+ display: block !important;
41
+ transition: background-color 0.3s ease !important;
42
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
43
+ }
44
+
45
+ .submit-button:hover {
46
+ background-color: #0056b3 !important;
47
+ box-shadow: 0 6px 8px rgba(0, 0, 0, 0.15) !important;
48
+ }