Update model with LoRA adapter
Browse files
adapter_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"has_adapter": true, "adapter_path": "speech-lora", "adapter_name": "speech"}
|
config.json
CHANGED
@@ -56,6 +56,13 @@
|
|
56 |
"initializer_range": 0.02,
|
57 |
"mm_tokens_per_image": 256,
|
58 |
"model_type": "gemma3",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
"text_config": {
|
60 |
"attention_bias": false,
|
61 |
"attention_dropout": 0.0,
|
@@ -88,7 +95,6 @@
|
|
88 |
},
|
89 |
"torch_dtype": "bfloat16",
|
90 |
"transformers_version": "4.50.0.dev0",
|
91 |
-
"use_cache": false,
|
92 |
"vision_config": {
|
93 |
"attention_dropout": 0.0,
|
94 |
"hidden_act": "gelu_pytorch_tanh",
|
|
|
56 |
"initializer_range": 0.02,
|
57 |
"mm_tokens_per_image": 256,
|
58 |
"model_type": "gemma3",
|
59 |
+
"speech_lora": {
|
60 |
+
"dp": 0.01,
|
61 |
+
"layer": "((layers.*self_attn\\.(q|k|v|o)_proj)|(layers.*mlp\\.(gate|up|down)_proj))",
|
62 |
+
"lora_alpha": 320,
|
63 |
+
"r": 320,
|
64 |
+
"use_rslora": true
|
65 |
+
},
|
66 |
"text_config": {
|
67 |
"attention_bias": false,
|
68 |
"attention_dropout": 0.0,
|
|
|
95 |
},
|
96 |
"torch_dtype": "bfloat16",
|
97 |
"transformers_version": "4.50.0.dev0",
|
|
|
98 |
"vision_config": {
|
99 |
"attention_dropout": 0.0,
|
100 |
"hidden_act": "gelu_pytorch_tanh",
|
model-00001-of-00002.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4991785040
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1ce1cb9238b56dba8ac52c728d2e62854a3b0dc601e64bd5e5b047a69bd3630
|
3 |
size 4991785040
|
{speech-adapter → speech-lora}/README.md
RENAMED
File without changes
|
{speech-adapter → speech-lora/speech}/adapter_config.json
RENAMED
@@ -12,25 +12,17 @@
|
|
12 |
"layers_pattern": null,
|
13 |
"layers_to_transform": null,
|
14 |
"loftq_config": {},
|
15 |
-
"lora_alpha":
|
16 |
"lora_bias": false,
|
17 |
-
"lora_dropout": 0.
|
18 |
"megatron_config": null,
|
19 |
"megatron_core": "megatron.core",
|
20 |
"modules_to_save": null,
|
21 |
"peft_type": "LORA",
|
22 |
-
"r":
|
23 |
"rank_pattern": {},
|
24 |
"revision": null,
|
25 |
-
"target_modules":
|
26 |
-
"q_proj",
|
27 |
-
"down_proj",
|
28 |
-
"k_proj",
|
29 |
-
"gate_proj",
|
30 |
-
"o_proj",
|
31 |
-
"v_proj",
|
32 |
-
"up_proj"
|
33 |
-
],
|
34 |
"task_type": "CAUSAL_LM",
|
35 |
"use_dora": false,
|
36 |
"use_rslora": true
|
|
|
12 |
"layers_pattern": null,
|
13 |
"layers_to_transform": null,
|
14 |
"loftq_config": {},
|
15 |
+
"lora_alpha": 320,
|
16 |
"lora_bias": false,
|
17 |
+
"lora_dropout": 0.01,
|
18 |
"megatron_config": null,
|
19 |
"megatron_core": "megatron.core",
|
20 |
"modules_to_save": null,
|
21 |
"peft_type": "LORA",
|
22 |
+
"r": 320,
|
23 |
"rank_pattern": {},
|
24 |
"revision": null,
|
25 |
+
"target_modules": "((layers.*self_attn\\.(q|k|v|o)_proj)|(layers.*mlp\\.(gate|up|down)_proj))",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
"task_type": "CAUSAL_LM",
|
27 |
"use_dora": false,
|
28 |
"use_rslora": true
|
{speech-adapter → speech-lora/speech}/adapter_model.safetensors
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7aacfcd0281ad18acbaac412bb2a23469adeb909011b08b384d7374cd611d423
|
3 |
+
size 2384262280
|