simon123905 commited on
Commit
12f4cfc
·
verified ·
1 Parent(s): 51d29c1

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. .gitattributes +0 -24
  2. LICENSE.md +66 -0
  3. README.md +477 -0
  4. config.json +32 -0
  5. model.safetensors +3 -0
  6. preprocessor_config.json +31 -0
.gitattributes CHANGED
@@ -8,8 +8,6 @@
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
  *.model filter=lfs diff=lfs merge=lfs -text
15
  *.msgpack filter=lfs diff=lfs merge=lfs -text
@@ -35,25 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.zip filter=lfs diff=lfs merge=lfs -text
36
  *.zst filter=lfs diff=lfs merge=lfs -text
37
  *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
 
11
  *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
LICENSE.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DINOv3 License
2
+
3
+ *Last Updated: August 19, 2025*
4
+
5
+ **“Agreement”** means the terms and conditions for use, reproduction, distribution and modification of the DINO Materials set forth herein.
6
+
7
+ **“DINO Materials”** means, collectively, Documentation and the models, software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code, and other elements of the foregoing distributed by Meta and made available under this Agreement.
8
+
9
+ **“Documentation”** means the specifications, manuals and documentation accompanying
10
+ DINO Materials distributed by Meta.
11
+
12
+ **“Licensee”** or **“you”** means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.
13
+
14
+ **“Meta”** or **“we”** means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) or Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).
15
+
16
+ **“Sanctions”** means any economic or trade sanctions or restrictions administered or enforced by the United States (including the Office of Foreign Assets Control of the U.S. Department of the Treasury (“OFAC”), the U.S. Department of State and the U.S. Department of Commerce), the United Nations, the European Union, or the United Kingdom.
17
+
18
+ **“Trade Controls”** means any of the following: Sanctions and applicable export and import controls.
19
+
20
+ By clicking “I Accept” below or by using or distributing any portion or element of the DINO Materials, you agree to be bound by this Agreement.
21
+
22
+ ## 1. License Rights and Redistribution.
23
+
24
+ a. <ins>Grant of Rights</ins>. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the DINO Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the DINO Materials.
25
+
26
+ b. <ins>Redistribution and Use</ins>.
27
+
28
+ i. Distribution of DINO Materials, and any derivative works thereof, are subject to the terms of this Agreement. If you distribute or make the DINO Materials, or any derivative works thereof, available to a third party, you may only do so under the terms of this Agreement and you shall provide a copy of this Agreement with any such DINO Materials.
29
+
30
+ ii. If you submit for publication the results of research you perform on, using, or otherwise in connection with DINO Materials, you must acknowledge the use of DINO Materials in your publication.
31
+
32
+ iii. Your use of the DINO Materials must comply with applicable laws and regulations, including Trade Control Laws and applicable privacy and data protection laws.
33
+
34
+ iv. Your use of the DINO Materials will not involve or encourage others to reverse engineer, decompile or discover the underlying components of the DINO Materials.
35
+
36
+ v. You are not the target of Trade Controls and your use of DINO Materials must comply with Trade Controls. You agree not to use, or permit others to use, DINO Materials for any activities subject to the International Traffic in Arms Regulations (ITAR) or end uses prohibited by Trade Controls, including those related to military or warfare purposes, nuclear industries or applications, espionage, or the development or use of guns or illegal weapons.
37
+
38
+ ## 2. User Support.
39
+
40
+ Your use of the DINO Materials is done at your own discretion; Meta does not process any information nor provide any service in relation to such use. Meta is under no obligation to provide any support services for the DINO Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind.
41
+
42
+ ## 3. Disclaimer of Warranty.
43
+
44
+ UNLESS REQUIRED BY APPLICABLE LAW, THE DINO MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE DINO MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE DINO MATERIALS AND ANY OUTPUT AND RESULTS.
45
+
46
+ ## 4. Limitation of Liability.
47
+
48
+ IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT OR INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.
49
+
50
+ ## 5. Intellectual Property.
51
+
52
+ a. Subject to Meta’s ownership of DINO Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the DINO Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.
53
+
54
+ b. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the DINO Materials, outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the DINO Materials.
55
+
56
+ ## 6. Term and Termination.
57
+
58
+ The term of this Agreement will commence upon your acceptance of this Agreement or access to the DINO Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the DINO Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.
59
+
60
+ ## 7. Governing Law and Jurisdiction.
61
+
62
+ This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.
63
+
64
+ ## 8. Modifications and Amendments.
65
+
66
+ Meta may modify this Agreement from time to time; provided that they are similar in spirit to the current version of the Agreement, but may differ in detail to address new problems or concerns. All such changes will be effective immediately. Your continued use of the DINO Materials after any modification to this Agreement constitutes your agreement to such modification. Except as provided in this Agreement, no modification or addition to any provision of this Agreement will be binding unless it is in writing and signed by an authorized representative of both you and Meta.
README.md ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ extra_gated_fields:
3
+ First Name: text
4
+ Last Name: text
5
+ Date of birth: date_picker
6
+ Country: country
7
+ Affiliation: text
8
+ Job title:
9
+ type: select
10
+ options:
11
+ - Student
12
+ - Research Graduate
13
+ - AI researcher
14
+ - AI developer/engineer
15
+ - Reporter
16
+ - Other
17
+ geo: ip_location
18
+ By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox
19
+ extra_gated_description: >-
20
+ The information you provide will be collected, stored, processed and shared in
21
+ accordance with the [Meta Privacy
22
+ Policy](https://www.facebook.com/privacy/policy/).
23
+ extra_gated_button_content: Submit
24
+ language:
25
+ - en
26
+ tags:
27
+ - dino
28
+ - dinov3
29
+ - arxiv:2508.10104
30
+ license: other
31
+ license_name: dinov3-license
32
+ license_link: https://ai.meta.com/resources/models-and-libraries/dinov3-license
33
+ base_model: facebook/dinov3-vit7b16-pretrain-lvd1689m
34
+ pipeline_tag: image-feature-extraction
35
+ library_name: transformers
36
+ ---
37
+
38
+ # Model Card for DINOv3
39
+
40
+ DINOv3 is a family of versatile vision foundation models that outperforms the specialized state of the art across a broad range of settings, without fine-tuning. DINOv3 produces high-quality dense features that achieve outstanding performance on various vision tasks, significantly surpassing previous self- and weakly-supervised foundation models.
41
+
42
+ ## Model Details
43
+
44
+ These are Vision Transformer and ConvNeXt models trained following the method described in the DINOv3 paper. 12 models are provided:
45
+
46
+ - 10 models pretrained on web data (LVD-1689M dataset)
47
+ - 1 ViT-7B trained from scratch,
48
+ - 5 ViT-S/S+/B/L/H+ models distilled from the ViT-7B,
49
+ - 4 ConvNeXt-{T/S/B/L} models distilled from the ViT-7B,
50
+ - 2 models pretrained on satellite data (SAT-493M dataset)
51
+ - 1 ViT-7B trained from scratch
52
+ - 1 ViT-L distilled from the ViT-7B
53
+
54
+
55
+ Each Transformer-based model takes an image as input and returns a class token, patch tokens (and register tokens). These models follow a ViT architecture, with a patch size of 16. For a 224x224 image, this results in 1 class token + 4 register tokens + 196 patch tokens = 201 tokens (for DINOv2 with registers this resulted in 1 + 4 + 256 = 261 tokens).
56
+
57
+ The models can accept larger images provided the image shapes are multiples of the patch size (16). If this condition is not verified, the model will crop to the closest smaller multiple of the patch size.
58
+
59
+ ### Model Description
60
+
61
+ - **Developed by:** Meta AI
62
+ - **Model type:** Vision Transformer, ConvNeXt
63
+ - **License:** [DINOv3 License](https://ai.meta.com/resources/models-and-libraries/dinov3-license/)
64
+
65
+ ### Model Sources
66
+
67
+ - **Repository:** [https://github.com/facebookresearch/dinov3](https://github.com/facebookresearch/dinov3)
68
+ - **Paper:** [https://arxiv.org/abs/2508.10104](https://arxiv.org/abs/2508.10104)
69
+
70
+ ## Uses
71
+
72
+ The models are vision backbones providing multi-purpose features for downstream tasks.
73
+
74
+ ### Direct Use
75
+
76
+ The models can be used without fine-tuning, with downstream classifiers as simple as linear layers, to obtain competitive results:
77
+
78
+ - on image classification, using k-NN classifiers on the class token
79
+ - on image classification, with logistic regression classifiers applied on the class token
80
+ - on image classification, with a linear layer applied on the class token and the average of the patch tokens
81
+ - on image retrieval using nearest neighbors
82
+ - on geometric and semantic 3D keypoint correspondances
83
+ - on depth estimation, semantic segmentation, using linear layers
84
+ - on unsupervised object discovery
85
+ - on video segmentation tracking
86
+ - on video classification, using a small 4-layer attentive probe
87
+
88
+ ### Downstream Use
89
+
90
+ While fine-tuning the models can yield some gains, it is recommended to keep this option as a last resort: the frozen features are expected to provide good performance out-of-the-box.
91
+
92
+ ## Bias, Risks, and Limitations
93
+
94
+ Compared to DINOv2 and SEERv2, DINOv3 delivers somewhat consistent performance across income categories on geographical fairness and diversity, although with a notable performance drop in the low-income bucket compared to the highest-income bucket.
95
+
96
+ DINOv3 also achieves relatively good scores across different regions, improving over its predecessor DINOv2. However, a relative difference is still observed between Europe and Africa.
97
+
98
+ ### Recommendations
99
+
100
+ Fine-tuning is expected to increase the biases in the features produced by the model as they will be tuned to the fine-tuning labels.
101
+
102
+ ## How to Get Started with the Model
103
+
104
+ The example below demonstrates how to obtain an image embedding with [Pipeline] or the [AutoModel] class.
105
+
106
+ ```python
107
+ from transformers import pipeline
108
+ from transformers.image_utils import load_image
109
+
110
+ url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
111
+ image = load_image(url)
112
+
113
+ feature_extractor = pipeline(
114
+ model="facebook/dinov3-vitl16-pretrain-lvd1689m",
115
+ task="image-feature-extraction",
116
+ )
117
+ features = feature_extractor(image)
118
+ ```
119
+
120
+ ```python
121
+ import torch
122
+ from transformers import AutoImageProcessor, AutoModel
123
+ from transformers.image_utils import load_image
124
+
125
+ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
126
+ image = load_image(url)
127
+
128
+ pretrained_model_name = "facebook/dinov3-vitl16-pretrain-lvd1689m"
129
+ processor = AutoImageProcessor.from_pretrained(pretrained_model_name)
130
+ model = AutoModel.from_pretrained(
131
+ pretrained_model_name,
132
+ device_map="auto",
133
+ )
134
+
135
+ inputs = processor(images=image, return_tensors="pt").to(model.device)
136
+ with torch.inference_mode():
137
+ outputs = model(**inputs)
138
+
139
+ pooled_output = outputs.pooler_output
140
+ print("Pooled output shape:", pooled_output.shape)
141
+ ```
142
+
143
+ ## Training Details
144
+
145
+ ### Training Data
146
+
147
+ - Web dataset (LVD-1689M): a curated dataset of 1,689 millions of images extracted from a large data
148
+ pool of 17 billions web images collected from public posts on Instagram
149
+
150
+ - Satellite dataset (SAT-493M): a dataset of 493 millions of 512x512 images sampled randomly from Maxar RGB ortho-rectified imagery at 0.6 meter resolution
151
+
152
+ ### Training Procedure
153
+
154
+ **Training objective:**
155
+
156
+ - DINO self-distillation loss with multi-crop
157
+ - iBOT masked-image modeling loss
158
+ - KoLeo regularization on [CLS] tokens
159
+ - Gram anchoring
160
+
161
+ - **Training regime:** PyTorch FSDP2 (with bf16 and fp8 matrix multiplications)
162
+
163
+ **Distillation:**
164
+
165
+ - Distillation follows the standard DINOv3 pretraining procedure, except the teacher is a frozen pretrained ViT-7B.
166
+
167
+ ## Evaluation
168
+
169
+ **Results**
170
+
171
+ The reader is referred to the associated paper for details on the evaluation protocols
172
+
173
+ *Results for ViT backbones pretrained (or distilled) on web (LVD-1689M)*
174
+
175
+ <table>
176
+ <tr>
177
+ <th></th>
178
+ <!-- <th></th> -->
179
+ <th colspan="4">Global Tasks</th>
180
+ <th colspan="5">Dense Tasks</th>
181
+ </tr>
182
+ <tr>
183
+ <th>Model</th>
184
+ <!-- <th>Dataset</th> -->
185
+ <th>IN-ReaL</th>
186
+ <th>IN-R</th>
187
+ <th>Obj.Net</th>
188
+ <th>Ox.-H</th>
189
+ <th>ADE20k</th>
190
+ <th>NYU↓</th>
191
+ <th>DAVIS</th>
192
+ <th>NAVI</th>
193
+ <th>SPair</th>
194
+ </tr>
195
+ <tr>
196
+ <td>DINOv3 ViT-S/16</td>
197
+ <!-- <td>LVD-1689M</td> -->
198
+ <td align="right">87.0</td>
199
+ <td align="right">60.4</td>
200
+ <td align="right">50.9</td>
201
+ <td align="right">49.5</td>
202
+ <td align="right">47.0</td>
203
+ <td align="right">0.403</td>
204
+ <td align="right">72.7</td>
205
+ <td align="right">56.3</td>
206
+ <td align="right">50.4</td>
207
+ </tr>
208
+ <tr>
209
+ <td>DINOv3 ViT-S+/16</td>
210
+ <!-- <td>LVD-1689M</td> -->
211
+ <td align="right">88.0</td>
212
+ <td align="right">68.8</td>
213
+ <td align="right">54.6</td>
214
+ <td align="right">50.0</td>
215
+ <td align="right">48.8</td>
216
+ <td align="right">0.399</td>
217
+ <td align="right">75.5</td>
218
+ <td align="right">57.1</td>
219
+ <td align="right">55.2</td>
220
+ </tr>
221
+ <tr>
222
+ <td>DINOv3 ViT-B/16</td>
223
+ <!-- <td>LVD-1689M</td> -->
224
+ <td align="right">89.3</td>
225
+ <td align="right">76.7</td>
226
+ <td align="right">64.1</td>
227
+ <td align="right">58.5</td>
228
+ <td align="right">51.8</td>
229
+ <td align="right">0.373</td>
230
+ <td align="right">77.2</td>
231
+ <td align="right">58.8</td>
232
+ <td align="right">57.2</td>
233
+ </tr>
234
+ <tr>
235
+ <td>DINOv3 ViT-L/16</td>
236
+ <!-- <td>LVD-1689M</td> -->
237
+ <td align="right">90.2</td>
238
+ <td align="right">88.1</td>
239
+ <td align="right">74.8</td>
240
+ <td align="right">63.1</td>
241
+ <td align="right">54.9</td>
242
+ <td align="right">0.352</td>
243
+ <td align="right">79.9</td>
244
+ <td align="right">62.3</td>
245
+ <td align="right">61.3</td>
246
+ </tr>
247
+ <tr>
248
+ <td>DINOv3 ViT-H+/16</td>
249
+ <!-- <td>LVD-1689M</td> -->
250
+ <td align="right">90.3</td>
251
+ <td align="right">90.0</td>
252
+ <td align="right">78.6</td>
253
+ <td align="right">64.5</td>
254
+ <td align="right">54.8</td>
255
+ <td align="right">0.352</td>
256
+ <td align="right">79.3</td>
257
+ <td align="right">63.3</td>
258
+ <td align="right">56.3</td>
259
+ </tr>
260
+ <tr>
261
+ <td>DINOv3 ViT-7B/16</td>
262
+ <!-- <td>LVD-1689M</td> -->
263
+ <td align="right">90.4</td>
264
+ <td align="right">91.1</td>
265
+ <td align="right">91.1</td>
266
+ <td align="right">72.8</td>
267
+ <td align="right">55.9</td>
268
+ <td align="right">0.309</td>
269
+ <td align="right">79.7</td>
270
+ <td align="right">64.4</td>
271
+ <td align="right">58.7</td>
272
+ </tr>
273
+ </table>
274
+
275
+ *Results for ConvNeXt backbones distilled on web (LVD-1689M)*
276
+
277
+ <table>
278
+ <tr>
279
+ <th></th>
280
+ <th colspan="6">Global Tasks</th>
281
+ <th colspan="2">Dense Tasks</th>
282
+ </tr>
283
+ <tr>
284
+ <th>Model</th>
285
+ <th colspan="2">IN-ReaL</th>
286
+ <th colspan="2">IN-R</th>
287
+ <th colspan="2">Obj.Net</th>
288
+ <th>ADE20k</th>
289
+ <th>NYU↓</th>
290
+ </tr>
291
+ <tr>
292
+ <td></th>
293
+ <td>@256px</td>
294
+ <td>@512px</td>
295
+ <td>@256px</td>
296
+ <td>@512px</td>
297
+ <td>@256px</td>
298
+ <td>@512px</td>
299
+ <td colspan="2"></td>
300
+ </tr>
301
+ <tr>
302
+ <td>DINOv3 ConvNeXt Tiny</td>
303
+ <td align="right">86.6</td>
304
+ <td align="right">87.7</td>
305
+ <td align="right">73.7</td>
306
+ <td align="right">74.1</td>
307
+ <td align="right">52.6</td>
308
+ <td align="right">58.7</td>
309
+ <td align="right">42.7</td>
310
+ <td align="right">0.448</td>
311
+ </tr>
312
+ <tr>
313
+ <td>DINOv3 ConvNeXt Small</td>
314
+ <td align="right">87.9</td>
315
+ <td align="right">88.7</td>
316
+ <td align="right">73.7</td>
317
+ <td align="right">74.1</td>
318
+ <td align="right">52.6</td>
319
+ <td align="right">58.7</td>
320
+ <td align="right">44.8</td>
321
+ <td align="right">0.432</td>
322
+ </tr>
323
+ <tr>
324
+ <td>DINOv3 ConvNeXt Base</td>
325
+ <td align="right">88.5</td>
326
+ <td align="right">89.2</td>
327
+ <td align="right">77.2</td>
328
+ <td align="right">78.2</td>
329
+ <td align="right">56.2</td>
330
+ <td align="right">61.3</td>
331
+ <td align="right">46.3</td>
332
+ <td align="right">0.420</td>
333
+ </tr>
334
+ <tr>
335
+ <td>DINOv3 ConvNeXt Large</td>
336
+ <td align="right">88.9</td>
337
+ <td align="right">89.4</td>
338
+ <td align="right">81.3</td>
339
+ <td align="right">82.4</td>
340
+ <td align="right">59.3</td>
341
+ <td align="right">65.2</td>
342
+ <td align="right">47.8</td>
343
+ <td align="right">0.403</td>
344
+ </tr>
345
+ </table>
346
+
347
+ *Results for ViT backbones pretrained (or distilled) on satellite (SAT-493M)*
348
+
349
+ <table>
350
+ <tr>
351
+ <th></th>
352
+ <th colspan="7">(GEO-Bench) Classification</th>
353
+ </tr>
354
+ <tr>
355
+ <th>Model</ht>
356
+ <th>m-BEnet</th>
357
+ <th>m-brick-kiln
358
+ <th>m-eurosat</th>
359
+ <th>m-forestnet</th>
360
+ <th>m-pv4ger</th>
361
+ <th>m-so2sat</th>
362
+ <th>mean</th>
363
+ </tr>
364
+ <tr>
365
+ <td>DINOv3 ViT-L/16</td>
366
+ <td>73.0</td>
367
+ <td>96.5</td>
368
+ <td>94.1</td>
369
+ <td>60.6</td>
370
+ <td>96.0</td>
371
+ <td>57.4</td>
372
+ <td>79.6</td>
373
+ </tr>
374
+ <tr>
375
+ <td>DINOv3 ViT-7B/16</td>
376
+ <td>74.0</td>
377
+ <td>97.2</td>
378
+ <td>94.8</td>
379
+ <td>62.3</td>
380
+ <td>96.1</td>
381
+ <td>62.1</td>
382
+ <td>81.1</td>
383
+ </tr>
384
+ <tr>
385
+ <th></th>
386
+ <th colspan="7">(GEO-Bench) Segmentation</th>
387
+ </tr>
388
+ <tr>
389
+ <th>Model</th>
390
+ <th>m-cashew</th>
391
+ <th>m-chesapeake</th>
392
+ <th>m-NeonTree</th>
393
+ <th>m-nz-cattle</th>
394
+ <th>m-pv4ger-seg</th>
395
+ <th>m-SA-crop</th>
396
+ <th>mean</th>
397
+ </tr>
398
+ <tr>
399
+ <td>DINOv3 ViT-L/16</td>
400
+ <td>94.2</td>
401
+ <td>75.6</td>
402
+ <td>61.8</td>
403
+ <td>83.7</td>
404
+ <td>95.2</td>
405
+ <td>36.8</td>
406
+ <td>74.5</td>
407
+ </tr>
408
+ <tr>
409
+ <td>DINOv3 ViT-7B/16</td>
410
+ <td>94.1</td>
411
+ <td>76.6</td>
412
+ <td>62.6</td>
413
+ <td>83.4</td>
414
+ <td>95.5</td>
415
+ <td>37.6</td>
416
+ <td>75.0</td>
417
+ </tr>
418
+ </table>
419
+
420
+
421
+ ## Environmental Impact
422
+
423
+ - **Hardware Type:** Nvidia H100
424
+ - **Hours used:** 61,440 hours for ViT-7B model training
425
+ - **Cloud Provider:** Private infrastructure
426
+ - **Compute Region:** USA
427
+ - **Carbon Emitted:** 18t CO2eq
428
+
429
+ ## Technical Specifications
430
+
431
+ ### Model Architecture and Objective
432
+
433
+ Vision Transformer models:
434
+
435
+ - ViT-S (21M parameters): patch size 16, embedding dimension 384, 4 register tokens, 6 heads, MLP FFN, RoPE
436
+ - ViT-S+ (29M parameters): patch size 16, embedding dimension 384, 4 register tokens, 6 heads, SwiGLU FFN, RoPE
437
+ - ViT-B (86M parameters): patch size 16, embedding dimension 768, 4 register tokens, 12 heads, MLP FFN, RoPE
438
+ - ViT-L (300M parameters): patch size 16, embedding dimension 1024, 4 register tokens, 16 heads, MLP FFN, RoPE
439
+ - ViT-H+ (840M parameters): patch size 16, embedding dimension 1280, 4 register tokens, 20 heads, SwiGLU FFN, RoPE
440
+ - ViT-7B (6716M parameters): patch size 16, embedding dimension 4096, 4 register tokens, 32 heads, SwiGLU FFN, RoPE
441
+
442
+ ConvNeXt models:
443
+
444
+ - ConvNeXt Tiny (29M parameters)
445
+ - ConvNeXt Small (50M parameters)
446
+ - ConvNeXt Base (89M parameters)
447
+ - ConvNeXt Large (198M parameters)
448
+
449
+ ### Compute Infrastructure
450
+
451
+ #### Hardware
452
+
453
+ Nvidia H100 GPUs
454
+
455
+ #### Software
456
+
457
+ PyTorch 2.7
458
+
459
+ ## More Information
460
+
461
+ See the [blog post](https://ai.meta.com/blog/dinov3-self-supervised-vision-model/) and the associated [website](https://ai.meta.com/dinov3/).
462
+
463
+ ## Citation
464
+
465
+ **BibTeX**
466
+
467
+ ```
468
+ @misc{simeoni2025dinov3,
469
+ title={{DINOv3}},
470
+ author={Sim{\'e}oni, Oriane and Vo, Huy V. and Seitzer, Maximilian and Baldassarre, Federico and Oquab, Maxime and Jose, Cijo and Khalidov, Vasil and Szafraniec, Marc and Yi, Seungeun and Ramamonjisoa, Micha{\"e}l and Massa, Francisco and Haziza, Daniel and Wehrstedt, Luca and Wang, Jianyuan and Darcet, Timoth{\'e}e and Moutakanni, Th{\'e}o and Sentana, Leonel and Roberts, Claire and Vedaldi, Andrea and Tolan, Jamie and Brandt, John and Couprie, Camille and Mairal, Julien and J{\'e}gou, Herv{\'e} and Labatut, Patrick and Bojanowski, Piotr},
471
+ year={2025},
472
+ eprint={2508.10104},
473
+ archivePrefix={arXiv},
474
+ primaryClass={cs.CV},
475
+ url={https://arxiv.org/abs/2508.10104},
476
+ }
477
+ ```
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "DINOv3ViTModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "drop_path_rate": 0.0,
7
+ "hidden_act": "gelu",
8
+ "hidden_size": 1024,
9
+ "image_size": 224,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 4096,
12
+ "key_bias": false,
13
+ "layer_norm_eps": 1e-05,
14
+ "layerscale_value": 1.0,
15
+ "mlp_bias": true,
16
+ "model_type": "dinov3_vit",
17
+ "num_attention_heads": 16,
18
+ "num_channels": 3,
19
+ "num_hidden_layers": 24,
20
+ "num_register_tokens": 4,
21
+ "patch_size": 16,
22
+ "pos_embed_jitter": null,
23
+ "pos_embed_rescale": 2.0,
24
+ "pos_embed_shift": null,
25
+ "proj_bias": true,
26
+ "query_bias": true,
27
+ "rope_theta": 100.0,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.56.0.dev0",
30
+ "use_gated_mlp": false,
31
+ "value_bias": true
32
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcb2e45127cccbf1601e5f42fef165eea275c8e5213197e8dcf3f48822718179
3
+ size 1212559808
preprocessor_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": null,
3
+ "data_format": "channels_first",
4
+ "default_to_square": true,
5
+ "device": null,
6
+ "disable_grouping": null,
7
+ "do_center_crop": null,
8
+ "do_convert_rgb": null,
9
+ "do_normalize": true,
10
+ "do_rescale": true,
11
+ "do_resize": true,
12
+ "image_mean": [
13
+ 0.485,
14
+ 0.456,
15
+ 0.406
16
+ ],
17
+ "image_processor_type": "DINOv3ViTImageProcessorFast",
18
+ "image_std": [
19
+ 0.229,
20
+ 0.224,
21
+ 0.225
22
+ ],
23
+ "input_data_format": null,
24
+ "resample": 2,
25
+ "rescale_factor": 0.00392156862745098,
26
+ "return_tensors": null,
27
+ "size": {
28
+ "height": 224,
29
+ "width": 224
30
+ }
31
+ }