prince-canuma
commited on
Commit
•
279376d
1
Parent(s):
605bdde
Upload folder using huggingface_hub
Browse files- README.md +7 -4
- chat_template.json +3 -0
- model-00001-of-00002.safetensors +1 -1
- model-00002-of-00002.safetensors +1 -1
- preprocessor_config.json +0 -17
- processor_config.json +6 -0
- tokenizer.json +0 -0
- tokenizer_config.json +2 -0
README.md
CHANGED
@@ -1,17 +1,20 @@
|
|
1 |
---
|
|
|
|
|
2 |
language:
|
3 |
- en
|
|
|
|
|
4 |
tags:
|
|
|
|
|
5 |
- mlx
|
6 |
-
datasets:
|
7 |
-
- liuhaotian/LLaVA-Instruct-150K
|
8 |
-
pipeline_tag: image-to-text
|
9 |
inference: false
|
10 |
arxiv: 2304.08485
|
11 |
---
|
12 |
|
13 |
# mlx-community/llava-1.5-7b-8bit
|
14 |
-
This model was converted to MLX format from [`llava-hf/llava-1.5-7b-hf`]() using mlx-vlm version **0.0
|
15 |
Refer to the [original model card](https://huggingface.co/llava-hf/llava-1.5-7b-hf) for more details on the model.
|
16 |
## Use with mlx
|
17 |
|
|
|
1 |
---
|
2 |
+
datasets:
|
3 |
+
- liuhaotian/LLaVA-Instruct-150K
|
4 |
language:
|
5 |
- en
|
6 |
+
license: llama2
|
7 |
+
pipeline_tag: image-text-to-text
|
8 |
tags:
|
9 |
+
- vision
|
10 |
+
- image-text-to-text
|
11 |
- mlx
|
|
|
|
|
|
|
12 |
inference: false
|
13 |
arxiv: 2304.08485
|
14 |
---
|
15 |
|
16 |
# mlx-community/llava-1.5-7b-8bit
|
17 |
+
This model was converted to MLX format from [`llava-hf/llava-1.5-7b-hf`]() using mlx-vlm version **0.1.0**.
|
18 |
Refer to the [original model card](https://huggingface.co/llava-hf/llava-1.5-7b-hf) for more details on the model.
|
19 |
## Use with mlx
|
20 |
|
chat_template.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"chat_template": "{% for message in messages %}{% if message['role'] != 'system' %}{{ message['role'].upper() + ': '}}{% endif %}{# Render all images first #}{% for content in message['content'] | selectattr('type', 'equalto', 'image') %}{{ '<image>\n' }}{% endfor %}{# Render all text next #}{% if message['role'] != 'assistant' %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{{ content['text'] + ' '}}{% endfor %}{% else %}{% for content in message['content'] | selectattr('type', 'equalto', 'text') %}{% generation %}{{ content['text'] + ' '}}{% endgeneration %}{% endfor %}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'ASSISTANT:' }}{% endif %}"
|
3 |
+
}
|
model-00001-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 5361046621
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae5809d2e73c854a14b3a5a4890bb23bf7c599e5f77b98ec670ad44c334cbade
|
3 |
size 5361046621
|
model-00002-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 2145161125
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4c19cca1c9c8fcc41a7ad25d425c7bc9cd9d47b3739f001218cebd254c26f8c2
|
3 |
size 2145161125
|
preprocessor_config.json
CHANGED
@@ -1,21 +1,4 @@
|
|
1 |
{
|
2 |
-
"_valid_processor_keys": [
|
3 |
-
"images",
|
4 |
-
"do_resize",
|
5 |
-
"size",
|
6 |
-
"resample",
|
7 |
-
"do_center_crop",
|
8 |
-
"crop_size",
|
9 |
-
"do_rescale",
|
10 |
-
"rescale_factor",
|
11 |
-
"do_normalize",
|
12 |
-
"image_mean",
|
13 |
-
"image_std",
|
14 |
-
"do_convert_rgb",
|
15 |
-
"return_tensors",
|
16 |
-
"data_format",
|
17 |
-
"input_data_format"
|
18 |
-
],
|
19 |
"crop_size": {
|
20 |
"height": 336,
|
21 |
"width": 336
|
|
|
1 |
{
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
"crop_size": {
|
3 |
"height": 336,
|
4 |
"width": 336
|
processor_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"image_token": "<image>",
|
3 |
+
"patch_size": null,
|
4 |
+
"processor_class": "LlavaProcessor",
|
5 |
+
"vision_feature_select_strategy": null
|
6 |
+
}
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
|
|
4 |
"added_tokens_decoder": {
|
5 |
"0": {
|
6 |
"content": "<unk>",
|
@@ -46,6 +47,7 @@
|
|
46 |
"bos_token": "<s>",
|
47 |
"clean_up_tokenization_spaces": false,
|
48 |
"eos_token": "</s>",
|
|
|
49 |
"model_max_length": 1000000000000000019884624838656,
|
50 |
"pad_token": "<pad>",
|
51 |
"padding_side": "left",
|
|
|
1 |
{
|
2 |
"add_bos_token": true,
|
3 |
"add_eos_token": false,
|
4 |
+
"add_prefix_space": null,
|
5 |
"added_tokens_decoder": {
|
6 |
"0": {
|
7 |
"content": "<unk>",
|
|
|
47 |
"bos_token": "<s>",
|
48 |
"clean_up_tokenization_spaces": false,
|
49 |
"eos_token": "</s>",
|
50 |
+
"legacy": false,
|
51 |
"model_max_length": 1000000000000000019884624838656,
|
52 |
"pad_token": "<pad>",
|
53 |
"padding_side": "left",
|