Felladrin commited on
Commit
2595e77
·
verified ·
1 Parent(s): 3ab8a33

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers.js
3
+ base_model:
4
+ - facebook/dinov2-large
5
+ ---
6
+
7
+ # dinov2-large (ONNX)
8
+
9
+ This is an ONNX version of [facebook/dinov2-large](https://huggingface.co/facebook/dinov2-large). It was automatically converted and uploaded using [this space](https://huggingface.co/spaces/onnx-community/convert-to-onnx).
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "facebook/dinov2-large",
4
+ "apply_layernorm": true,
5
+ "architectures": [
6
+ "Dinov2Model"
7
+ ],
8
+ "attention_probs_dropout_prob": 0.0,
9
+ "drop_path_rate": 0.0,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.0,
12
+ "hidden_size": 1024,
13
+ "image_size": 518,
14
+ "initializer_range": 0.02,
15
+ "layer_norm_eps": 1e-06,
16
+ "layerscale_value": 1.0,
17
+ "mlp_ratio": 4,
18
+ "model_type": "dinov2",
19
+ "num_attention_heads": 16,
20
+ "num_channels": 3,
21
+ "num_hidden_layers": 24,
22
+ "out_features": [
23
+ "stage24"
24
+ ],
25
+ "out_indices": [
26
+ 24
27
+ ],
28
+ "patch_size": 14,
29
+ "qkv_bias": true,
30
+ "reshape_hidden_states": true,
31
+ "stage_names": [
32
+ "stem",
33
+ "stage1",
34
+ "stage2",
35
+ "stage3",
36
+ "stage4",
37
+ "stage5",
38
+ "stage6",
39
+ "stage7",
40
+ "stage8",
41
+ "stage9",
42
+ "stage10",
43
+ "stage11",
44
+ "stage12",
45
+ "stage13",
46
+ "stage14",
47
+ "stage15",
48
+ "stage16",
49
+ "stage17",
50
+ "stage18",
51
+ "stage19",
52
+ "stage20",
53
+ "stage21",
54
+ "stage22",
55
+ "stage23",
56
+ "stage24"
57
+ ],
58
+ "torch_dtype": "float32",
59
+ "transformers_version": "4.49.0",
60
+ "use_mask_token": true,
61
+ "use_swiglu_ffn": false
62
+ }
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fabcbe2e231b7b2035588329ec6df427c08baa8723eb31546c52738888013ca
3
+ size 1218043280
onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4830e2f0bc5b3f9caeec6a0d47b5ed86a21f64863656947c2a20b83dcb729cdd
3
+ size 179975666
onnx/model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:764a9f936b0ba2aaeba99c075f6bd9f2659bd64a4c252de6809ac93d65824693
3
+ size 612112084
onnx/model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68910b3dff166c4853ac9ec87755249dfa5f59ab6e388b1d105ee017f4257d3d
3
+ size 311541804
onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a72dd8657d1910463462db21027d983ff1cfda3b30c5552834091f0e9eaf3cfe
3
+ size 198848858
onnx/model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5644399e40b9ed542a1da5c204c3ea8f662070414cb62184e353011126f69f0
3
+ size 178023070
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:973653db89a03e48b378b7107c589899de30054e6028203619bac5f83e062124
3
+ size 311541804
onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:973653db89a03e48b378b7107c589899de30054e6028203619bac5f83e062124
3
+ size 311541804
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.485,
13
+ 0.456,
14
+ 0.406
15
+ ],
16
+ "image_processor_type": "BitImageProcessor",
17
+ "image_std": [
18
+ 0.229,
19
+ 0.224,
20
+ 0.225
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 256
26
+ }
27
+ }
quantize_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "modes": [
3
+ "fp16",
4
+ "q8",
5
+ "int8",
6
+ "uint8",
7
+ "q4",
8
+ "q4f16",
9
+ "bnb4"
10
+ ],
11
+ "per_channel": true,
12
+ "reduce_range": true,
13
+ "block_size": null,
14
+ "is_symmetric": true,
15
+ "accuracy_level": null,
16
+ "quant_type": 1,
17
+ "op_block_list": null
18
+ }