twn39 commited on
Commit
fdccee2
·
verified ·
1 Parent(s): e0ecc45

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers.js
3
+ base_model:
4
+ - wkcn/TinyCLIP-ViT-39M-16-Text-19M-YFCC15M
5
+ ---
6
+
7
+ # TinyCLIP-ViT-39M-16-Text-19M-YFCC15M (ONNX)
8
+
9
+ This is an ONNX version of [wkcn/TinyCLIP-ViT-39M-16-Text-19M-YFCC15M](https://huggingface.co/wkcn/TinyCLIP-ViT-39M-16-Text-19M-YFCC15M). It was automatically converted and uploaded using [this space](https://huggingface.co/spaces/onnx-community/convert-to-onnx).
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "wkcn/TinyCLIP-ViT-39M-16-Text-19M-YFCC15M",
4
+ "architectures": [
5
+ "CLIPModel"
6
+ ],
7
+ "initializer_factor": 1.0,
8
+ "logit_scale_init_value": 2.6592,
9
+ "model_type": "clip",
10
+ "projection_dim": 512,
11
+ "text_config": {
12
+ "dropout": 0.0,
13
+ "hidden_act": "gelu",
14
+ "model_type": "clip_text_model",
15
+ "num_hidden_layers": 6,
16
+ "torch_dtype": "float32"
17
+ },
18
+ "torch_dtype": "float32",
19
+ "transformers_version": "4.49.0",
20
+ "vision_config": {
21
+ "dropout": 0.0,
22
+ "hidden_act": "gelu",
23
+ "hidden_size": 512,
24
+ "intermediate_size": 2048,
25
+ "model_type": "clip_vision_model",
26
+ "num_attention_heads": 8,
27
+ "patch_size": 16,
28
+ "torch_dtype": "float32"
29
+ }
30
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:367c2dbbde6d7539a3e647973c3e4f33d2893d129266cc0eae1d060625413c76
3
+ size 332805969
onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70d0c10b2e0de10c66e3d9ec87bd0f9d54ec18f8515d74b2cde696df34a104f2
3
+ size 136379062
onnx/model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:678ebec2b32fe2030690a755445c87e61ad0abf6bfcc02b61911d21a9ad00815
3
+ size 166604219
onnx/model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e37ffdc95688b005de45d8c1241697a4f95e325a129ea5e1ab45b2ddaf65c047
3
+ size 84740686
onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f56f8ba81c6645c7a88d1ab8d36095821a7ecb570824a7e180136fb98bc36b84
3
+ size 139949894
onnx/model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d160433da9978859147684441e44911fc7f1101cda3c5c754a05e8504c2f5635
3
+ size 84471217
onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd682e9bd91142c5bc74ac53ac5cb5a87e0ee16a95ca33ffc902b8d4a3e1283a
3
+ size 84740686
onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd682e9bd91142c5bc74ac53ac5cb5a87e0ee16a95ca33ffc902b8d4a3e1283a
3
+ size 84740686
preprocessor_config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 224,
4
+ "width": 224
5
+ },
6
+ "do_center_crop": true,
7
+ "do_convert_rgb": true,
8
+ "do_normalize": true,
9
+ "do_rescale": true,
10
+ "do_resize": true,
11
+ "image_mean": [
12
+ 0.48145466,
13
+ 0.4578275,
14
+ 0.40821073
15
+ ],
16
+ "image_processor_type": "CLIPFeatureExtractor",
17
+ "image_std": [
18
+ 0.26862954,
19
+ 0.26130258,
20
+ 0.27577711
21
+ ],
22
+ "resample": 3,
23
+ "rescale_factor": 0.00392156862745098,
24
+ "size": {
25
+ "shortest_edge": 224
26
+ }
27
+ }
quantize_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "modes": [
3
+ "fp16",
4
+ "q8",
5
+ "int8",
6
+ "uint8",
7
+ "q4",
8
+ "q4f16",
9
+ "bnb4"
10
+ ],
11
+ "per_channel": true,
12
+ "reduce_range": true,
13
+ "block_size": null,
14
+ "is_symmetric": true,
15
+ "accuracy_level": null,
16
+ "quant_type": 1,
17
+ "op_block_list": null
18
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|startoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "49406": {
4
+ "content": "<|startoftext|>",
5
+ "lstrip": false,
6
+ "normalized": true,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "49407": {
12
+ "content": "<|endoftext|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ }
19
+ },
20
+ "bos_token": "<|startoftext|>",
21
+ "clean_up_tokenization_spaces": false,
22
+ "eos_token": "<|endoftext|>",
23
+ "extra_special_tokens": {},
24
+ "model_max_length": 1000000000000000019884624838656,
25
+ "pad_token": "<|endoftext|>",
26
+ "processor_class": "CLIPProcessor",
27
+ "tokenizer_class": "CLIPTokenizer",
28
+ "unk_token": "<|endoftext|>"
29
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff