AnonRes commited on
Commit
e33e270
·
verified ·
1 Parent(s): 0720126

Include README.md and add citation infos for architectures, methods, dataset and framework into the checkpoint

Browse files
Files changed (3) hide show
  1. README.md +52 -3
  2. adaptation_plan.json +108 -0
  3. checkpoint_final.pth +3 -0
README.md CHANGED
@@ -1,3 +1,52 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ datasets:
4
+ - AnonRes/OpenMind
5
+ pipeline_tag: image-feature-extraction
6
+ tags:
7
+ - medical
8
+ ---
9
+
10
+ # OpenMind Benchmark 3D SSL Models
11
+
12
+ > **Model from the paper**: [An OpenMind for 3D medical vision self-supervised learning](https://arxiv.org/abs/2412.17041)
13
+ > **Pre-training codebase used to create checkpoint**: [MIC-DKFZ/nnssl](https://github.com/MIC-DKFZ/nnssl)
14
+ > **Dataset**: [AnonRes/OpenMind](https://huggingface.co/datasets/AnonRes/OpenMind)
15
+ > **Downstream (segmentation) fine-tuning**: [TaWald/nnUNet](https://github.com/TaWald/nnUNet)
16
+
17
+ ---
18
+
19
+ ![OpenMind](https://huggingface.co/datasets/AnonRes/OpenMind/resolve/main/assets/OpenMindDataset.png)
20
+
21
+ ## 🔍 Overview
22
+
23
+ This repository hosts pre-trained checkpoints from the **OpenMind** benchmark:
24
+ 📄 **"An OpenMind for 3D medical vision self-supervised learning"**
25
+ ([arXiv:2412.17041](https://arxiv.org/abs/2412.17041)) — the first extensive benchmark study for **self-supervised learning (SSL)** on **3D medical imaging** data.
26
+
27
+ The models were pre-trained using various SSL methods on the [OpenMind Dataset](https://huggingface.co/datasets/AnonRes/OpenMind), a large-scale, standardized collection of public brain MRI datasets.
28
+
29
+ **These models are not recommended to be used as-is.** Instead we recommend using the downstream fine-tuning pipelines for **segmentation** and **classification**, available in the [adaptation repository](https://github.com/TaWald/nnUNet).
30
+ *While direct download is possible, we recommend using the auto-download of the respective fine-tuning repositories.*
31
+
32
+ ---
33
+
34
+ ## 🧠 Model Variants
35
+
36
+ We release SSL checkpoints for two backbone architectures:
37
+
38
+ - **ResEnc-L**: A CNN-based encoder [[link1](https://arxiv.org/abs/2410.23132), [link2](https://arxiv.org/abs/2404.09556)]
39
+ - **Primus-M**: A transformer-based encoder [[Primus paper](https://arxiv.org/abs/2503.01835)]
40
+
41
+ Each encoder has been pre-trained using the following SSL techniques:
42
+
43
+ | Method | Description |
44
+ |---------------|-------------|
45
+ | [Volume Contrastive (VoCo)](https://arxiv.org/abs/2402.17300) | Global contrastive learning in 3D volumes |
46
+ | [VolumeFusion (VF)](https://arxiv.org/abs/2306.16925) | Spatial fusion-based SSL |
47
+ | [Models Genesis (MG)](https://www.sciencedirect.com/science/article/pii/S1361841520302048) | Classic 3D self-reconstruction |
48
+ | [Masked Autoencoders (MAE)](https://openaccess.thecvf.com/content/CVPR2022/html/He_Masked_Autoencoders_Are_Scalable_Vision_Learners_CVPR_2022_paper) | Patch masking and reconstruction |
49
+ | [Spark 3D (S3D)](https://arxiv.org/abs/2410.23132) | 3D adaptation of Spark framework |
50
+ | [SimMIM](https://openaccess.thecvf.com/content/CVPR2022/html/Xie_SimMIM_A_Simple_Framework_for_Masked_Image_Modeling_CVPR_2022_paper.html) | Simple masked reconstruction |
51
+ | [SwinUNETR SSL](https://arxiv.org/abs/2111.14791) | Transformer-based pre-training |
52
+ | [SimCLR](https://arxiv.org/abs/2002.05709) | Contrastive learning baseline |
adaptation_plan.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architecture_plans": {
3
+ "arch_class_name": "PrimusM",
4
+ "arch_kwargs": null,
5
+ "arch_kwargs_requiring_import": null
6
+ },
7
+ "pretrain_plan": {
8
+ "dataset_name": "Dataset745_OpenNeuro_v2",
9
+ "plans_name": "nnsslPlans",
10
+ "original_median_spacing_after_transp": [
11
+ 1,
12
+ 1,
13
+ 1
14
+ ],
15
+ "image_reader_writer": "SimpleITKIO",
16
+ "transpose_forward": [
17
+ 0,
18
+ 1,
19
+ 2
20
+ ],
21
+ "transpose_backward": [
22
+ 0,
23
+ 1,
24
+ 2
25
+ ],
26
+ "configurations": {
27
+ "onemmiso": {
28
+ "data_identifier": "nnsslPlans_3d_fullres",
29
+ "preprocessor_name": "DefaultPreprocessor",
30
+ "spacing_style": "onemmiso",
31
+ "normalization_schemes": [
32
+ "ZScoreNormalization"
33
+ ],
34
+ "use_mask_for_norm": [
35
+ false
36
+ ],
37
+ "resampling_fn_data": "resample_data_or_seg_to_shape",
38
+ "resampling_fn_data_kwargs": {
39
+ "is_seg": false,
40
+ "order": 3,
41
+ "order_z": 0,
42
+ "force_separate_z": null
43
+ },
44
+ "resampling_fn_mask": "resample_data_or_seg_to_shape",
45
+ "resampling_fn_mask_kwargs": {
46
+ "is_seg": true,
47
+ "order": 1,
48
+ "order_z": 0,
49
+ "force_separate_z": null
50
+ },
51
+ "spacing": [
52
+ 1,
53
+ 1,
54
+ 1
55
+ ],
56
+ "patch_size": [
57
+ 160,
58
+ 160,
59
+ 160
60
+ ]
61
+ }
62
+ },
63
+ "experiment_planner_used": "FixedResEncUNetPlanner"
64
+ },
65
+ "pretrain_num_input_channels": 1,
66
+ "recommended_downstream_patchsize": [
67
+ 160,
68
+ 160,
69
+ 160
70
+ ],
71
+ "key_to_encoder": "eva",
72
+ "key_to_stem": "down_projection",
73
+ "keys_to_in_proj": [
74
+ "down_projection.proj"
75
+ ],
76
+ "key_to_lpe": "eva.pos_embed",
77
+ "citations": [
78
+ {
79
+ "type": "Architecture",
80
+ "name": "PrimusM",
81
+ "bibtex_citations": [
82
+ "@article{wald2025primus,\n title={Primus: Enforcing attention usage for 3d medical image segmentation},\n author={Wald, Tassilo and Roy, Saikat and Isensee, Fabian and Ulrich, Constantin and Ziegler, Sebastian and Trofimova, Dasha and Stock, Raphael and Baumgartner, Michael and K{\"o}hler, Gregor and Maier-Hein, Klaus},\n journal={arXiv preprint arXiv:2503.01835},\n year={2025}\n }"
83
+ ]
84
+ },
85
+ {
86
+ "type": "Pretraining Method",
87
+ "name": "Masked Auto Encoder",
88
+ "bibtex_citations": [
89
+ "@article{wald2024openmind,\n title={An OpenMind for 3D medical vision self-supervised learning},\n author={Wald, Tassilo and Ulrich, Constantin and Suprijadi, Jonathan and Ziegler, Sebastian and Nohel, Michal and Peretzke, Robin and K{\"o}hler, Gregor and Maier-Hein, Klaus H},\n journal={arXiv preprint arXiv:2412.17041},\n year={2024}\n }\n "
90
+ ]
91
+ },
92
+ {
93
+ "type": "Pre-Training Dataset",
94
+ "name": "OpenMind",
95
+ "bibtex_citations": [
96
+ "@article{wald2024openmind,\n title={An OpenMind for 3D medical vision self-supervised learning},\n author={Wald, Tassilo and Ulrich, Constantin and Suprijadi, Jonathan and Ziegler, Sebastian and Nohel, Michal and Peretzke, Robin and K{\"o}hler, Gregor and Maier-Hein, Klaus H},\n journal={arXiv preprint arXiv:2412.17041},\n year={2024}\n }\n "
97
+ ]
98
+ },
99
+ {
100
+ "type": "Framework",
101
+ "name": "nnssl",
102
+ "bibtex_citations": [
103
+ "@article{wald2024revisiting,\n title={Revisiting MAE pre-training for 3D medical image segmentation},\n author={Wald, Tassilo and Ulrich, Constantin and Lukyanenko, Stanislav and Goncharov, Andrei and Paderno, Alberto and Maerkisch, Leander and J{\"a}ger, Paul F and Maier-Hein, Klaus},\n journal={arXiv preprint arXiv:2410.23132},\n year={2024}\n}"
104
+ ]
105
+ }
106
+ ],
107
+ "trainer_name": "BaseEvaMAETrainer_BS8"
108
+ }
checkpoint_final.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8c7c1d4e80dc751e39c96fe7ee3e324a1bdeca3c716e2419dfed340d85cecf0
3
+ size 788449517