aiden200 commited on
Commit
378fd45
·
verified ·
1 Parent(s): f6f4af3

Model save

Browse files
README.md CHANGED
@@ -1,3 +1,59 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: lmms-lab/llava-onevision-qwen2-7b-ov
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: aha
8
+ results: []
9
+ library_name: peft
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # aha
16
+
17
+ This model is a fine-tuned version of [lmms-lab/llava-onevision-qwen2-7b-ov](https://huggingface.co/lmms-lab/llava-onevision-qwen2-7b-ov) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 2e-05
37
+ - train_batch_size: 1
38
+ - eval_batch_size: 1
39
+ - seed: 42
40
+ - distributed_type: multi-GPU
41
+ - num_devices: 2
42
+ - total_train_batch_size: 2
43
+ - total_eval_batch_size: 2
44
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
+ - lr_scheduler_type: cosine
46
+ - lr_scheduler_warmup_ratio: 0.05
47
+ - num_epochs: 1.0
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - PEFT 0.4.0
56
+ - Transformers 4.40.0.dev0
57
+ - Pytorch 2.5.1+cu124
58
+ - Datasets 2.16.1
59
+ - Tokenizers 0.15.2
adapter_config.json ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "lmms-lab/llava-onevision-qwen2-7b-ov",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": [
13
+ "connector",
14
+ "mm_projector",
15
+ "lm_head",
16
+ "informative_head",
17
+ "relevance_head",
18
+ "uncertainty_head"
19
+ ],
20
+ "peft_type": "LORA",
21
+ "r": 16,
22
+ "revision": null,
23
+ "target_modules": "model\\.layers.*(q_proj|k_proj|v_proj|o_proj|gate_proj|up_proj|down_proj)$",
24
+ "task_type": "CAUSAL_LM"
25
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f97060c47ed48e5e64aa43e746949a6fac33f5a78ac80248269322979f04a310
3
+ size 1204788040
added_tokens.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "<image>": 151646,
3
+ "<|endoftext|>": 151643,
4
+ "<|im_end|>": 151645,
5
+ "<|im_start|>": 151644
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ {
4
+ "content": "<image>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ }
10
+ ],
11
+ "bos_token": "<|im_start|>",
12
+ "eos_token": "<|im_end|>",
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "151646": {
29
+ "content": "<image>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ }
36
+ },
37
+ "additional_special_tokens": [
38
+ "<image>"
39
+ ],
40
+ "bos_token": "<|im_start|>",
41
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{{ bos_token + 'system\n' + messages[0]['content'] + eos_token}}{% set messages = messages[1:] %}{% endif %}{% for i in range(messages | length) %}{% set message = messages[i] %}{% if message['role'] == 'user' %}{% if add_stream_query_prompt %}{{ eos_token + '\n' + bos_token + 'user\n' + message['content'] + eos_token }}{% else %}{{ '\n' + bos_token + 'user\n' + message['content'] + eos_token }}{% endif %}{% elif message['role'] == 'assistant' %}{{ '\n' + bos_token + 'assistant\n' + message['content'] + eos_token }}{% elif message['role'] == 'stream' and message['num_frames'] > 0 %}{{ '\n' + bos_token + 'stream\n' + ''.join([49 * '<image>'] * message['num_frames']) + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '\n' + bos_token + 'assistant\n' }}{% elif add_stream_prompt %}{{ '\n' + bos_token + 'stream\n' }}{% elif add_stream_generation_prompt %}{{ eos_token + '\n' + bos_token + 'assistant\n' }}{% endif %}",
42
+ "clean_up_tokenization_spaces": false,
43
+ "eos_token": "<|im_end|>",
44
+ "errors": "replace",
45
+ "model_max_length": 32768,
46
+ "pad_token": "<|endoftext|>",
47
+ "padding_side": "left",
48
+ "processor_class": "LlavaProcessor",
49
+ "split_special_tokens": false,
50
+ "tokenizer_class": "Qwen2Tokenizer",
51
+ "unk_token": null
52
+ }
train.log ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd341176c149ec5641fe9dcb2c8115ddbaf4607a19ccacc3e7f55f6c9c56ce49
3
+ size 7544
vocab.json ADDED
The diff for this file is too large to render. See raw diff