YvanCarre commited on
Commit
14e0a8a
·
verified ·
1 Parent(s): 1586360

Upload InkubaLM multi-task with adapters distill

Browse files
config.json ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "YvanCarre/InkubaLM-multitask_dist",
3
+ "adapters": {
4
+ "adapters": {
5
+ "machine-translation": "26cd1b10db746518",
6
+ "sentiment-analysis": "seq_bn",
7
+ "xnli": "seq_bn"
8
+ },
9
+ "config_map": {
10
+ "26cd1b10db746518": {
11
+ "adapter_residual_before_ln": false,
12
+ "cross_adapter": false,
13
+ "dropout": 0.0,
14
+ "factorized_phm_W": true,
15
+ "factorized_phm_rule": false,
16
+ "hypercomplex_nonlinearity": "glorot-uniform",
17
+ "init_weights": "bert",
18
+ "inv_adapter": null,
19
+ "inv_adapter_reduction_factor": null,
20
+ "is_parallel": false,
21
+ "learn_phm": true,
22
+ "leave_out": [],
23
+ "ln_after": false,
24
+ "ln_before": false,
25
+ "mh_adapter": false,
26
+ "non_linearity": "relu",
27
+ "original_ln_after": true,
28
+ "original_ln_before": true,
29
+ "output_adapter": true,
30
+ "phm_bias": true,
31
+ "phm_c_init": "normal",
32
+ "phm_dim": 4,
33
+ "phm_init_range": 0.0001,
34
+ "phm_layer": false,
35
+ "phm_rank": 1,
36
+ "reduction_factor": 8,
37
+ "residual_before_ln": true,
38
+ "scaling": 1.0,
39
+ "shared_W_phm": false,
40
+ "shared_phm_rule": true,
41
+ "stochastic_depth": 0.0,
42
+ "use_gating": false
43
+ }
44
+ },
45
+ "fusion_config_map": {},
46
+ "fusion_name_map": {},
47
+ "fusions": {}
48
+ },
49
+ "architectures": [
50
+ "LlamaAdapterModel"
51
+ ],
52
+ "attention_bias": false,
53
+ "attention_dropout": 0.0,
54
+ "auto_map": {
55
+ "AutoModelForCausalLM": "lelapa/InkubaLM-0.4B--vulavulaslm.VulavulaLlamaForCausalLM"
56
+ },
57
+ "bos_token_id": 1,
58
+ "eos_token_id": 2,
59
+ "head_dim": 64,
60
+ "hidden_act": "silu",
61
+ "hidden_size": 1024,
62
+ "id2label": {
63
+ "0": "LABEL_0",
64
+ "1": "LABEL_1",
65
+ "2": "LABEL_2"
66
+ },
67
+ "initializer_range": 0.02,
68
+ "intermediate_size": 2752,
69
+ "label2id": {
70
+ "LABEL_0": 0,
71
+ "LABEL_1": 1,
72
+ "LABEL_2": 2
73
+ },
74
+ "max_position_embeddings": 2048,
75
+ "mlp_bias": false,
76
+ "model_type": "llama",
77
+ "num_attention_heads": 32,
78
+ "num_hidden_layers": 4,
79
+ "num_key_value_heads": 32,
80
+ "prediction_heads": {
81
+ "default": {
82
+ "activation_function": null,
83
+ "bias": false,
84
+ "dropout_prob": null,
85
+ "embedding_size": 2048,
86
+ "head_type": "causal_lm",
87
+ "label2id": null,
88
+ "layer_norm": false,
89
+ "layers": 1,
90
+ "shift_labels": true,
91
+ "vocab_size": 61788
92
+ },
93
+ "machine-translation": {
94
+ "activation_function": "gelu",
95
+ "bias": true,
96
+ "dropout_prob": null,
97
+ "embedding_size": 2048,
98
+ "head_type": "causal_lm",
99
+ "label2id": null,
100
+ "layer_norm": true,
101
+ "layers": 2,
102
+ "shift_labels": true,
103
+ "vocab_size": 61788
104
+ },
105
+ "sentiment-analysis": {
106
+ "activation_function": "tanh",
107
+ "bias": true,
108
+ "dropout_prob": null,
109
+ "head_type": "classification",
110
+ "label2id": {
111
+ "LABEL_0": 0,
112
+ "LABEL_1": 1,
113
+ "LABEL_2": 2
114
+ },
115
+ "layers": 2,
116
+ "num_labels": 3,
117
+ "use_pooler": false
118
+ },
119
+ "xnli": {
120
+ "activation_function": "tanh",
121
+ "bias": true,
122
+ "dropout_prob": null,
123
+ "head_type": "classification",
124
+ "label2id": {
125
+ "LABEL_0": 0,
126
+ "LABEL_1": 1,
127
+ "LABEL_2": 2
128
+ },
129
+ "layers": 2,
130
+ "num_labels": 3,
131
+ "use_pooler": false
132
+ }
133
+ },
134
+ "pretraining_tp": 1,
135
+ "rms_norm_eps": 1e-05,
136
+ "rope_scaling": null,
137
+ "rope_theta": 10000.0,
138
+ "tie_word_embeddings": false,
139
+ "torch_dtype": "float32",
140
+ "transformers_version": "4.47.1",
141
+ "use_cache": true,
142
+ "vocab_size": 61788
143
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.47.1"
6
+ }
machine-translation/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "dropout": 0.0,
6
+ "factorized_phm_W": true,
7
+ "factorized_phm_rule": false,
8
+ "hypercomplex_nonlinearity": "glorot-uniform",
9
+ "init_weights": "bert",
10
+ "inv_adapter": null,
11
+ "inv_adapter_reduction_factor": null,
12
+ "is_parallel": false,
13
+ "learn_phm": true,
14
+ "leave_out": [],
15
+ "ln_after": false,
16
+ "ln_before": false,
17
+ "mh_adapter": false,
18
+ "non_linearity": "relu",
19
+ "original_ln_after": true,
20
+ "original_ln_before": true,
21
+ "output_adapter": true,
22
+ "phm_bias": true,
23
+ "phm_c_init": "normal",
24
+ "phm_dim": 4,
25
+ "phm_init_range": 0.0001,
26
+ "phm_layer": false,
27
+ "phm_rank": 1,
28
+ "reduction_factor": 8,
29
+ "residual_before_ln": true,
30
+ "scaling": 1.0,
31
+ "shared_W_phm": false,
32
+ "shared_phm_rule": true,
33
+ "stochastic_depth": 0.0,
34
+ "use_gating": false
35
+ },
36
+ "hidden_size": 1024,
37
+ "model_class": "LlamaAdapterModel",
38
+ "model_name": "YvanCarre/InkubaLM-multitask_dist",
39
+ "model_type": "llama",
40
+ "name": "machine-translation",
41
+ "version": "adapters.1.1.0"
42
+ }
machine-translation/head_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "gelu",
4
+ "bias": true,
5
+ "dropout_prob": null,
6
+ "embedding_size": 2048,
7
+ "head_type": "causal_lm",
8
+ "label2id": null,
9
+ "layer_norm": true,
10
+ "layers": 2,
11
+ "shift_labels": true,
12
+ "vocab_size": 61788
13
+ },
14
+ "hidden_size": 1024,
15
+ "model_class": "LlamaAdapterModel",
16
+ "model_name": "YvanCarre/InkubaLM-multitask_dist",
17
+ "model_type": "llama",
18
+ "name": "machine-translation",
19
+ "version": "adapters.1.1.0"
20
+ }
machine-translation/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fb88569f3ed5120ef33ce370b82764eee4ba3fa0ebf1bc96beb51d86abad020
3
+ size 4219270
machine-translation/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16c21fff27dfdd80968f25262ffd61eb697fa424946084e19f16f0e127dc6f42
3
+ size 514830390
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee764c394e1d679cef46a89d5c2d0d70054c869d166541c4fbe12f63a8297b5e
3
+ size 1560475656
sentiment-analysis/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "dropout": 0.0,
6
+ "factorized_phm_W": true,
7
+ "factorized_phm_rule": false,
8
+ "hypercomplex_nonlinearity": "glorot-uniform",
9
+ "init_weights": "bert",
10
+ "inv_adapter": null,
11
+ "inv_adapter_reduction_factor": null,
12
+ "is_parallel": false,
13
+ "learn_phm": true,
14
+ "leave_out": [],
15
+ "ln_after": false,
16
+ "ln_before": false,
17
+ "mh_adapter": false,
18
+ "non_linearity": "relu",
19
+ "original_ln_after": true,
20
+ "original_ln_before": true,
21
+ "output_adapter": true,
22
+ "phm_bias": true,
23
+ "phm_c_init": "normal",
24
+ "phm_dim": 4,
25
+ "phm_init_range": 0.0001,
26
+ "phm_layer": false,
27
+ "phm_rank": 1,
28
+ "reduction_factor": 16,
29
+ "residual_before_ln": true,
30
+ "scaling": 1.0,
31
+ "shared_W_phm": false,
32
+ "shared_phm_rule": true,
33
+ "stochastic_depth": 0.0,
34
+ "use_gating": false
35
+ },
36
+ "hidden_size": 1024,
37
+ "model_class": "LlamaAdapterModel",
38
+ "model_name": "YvanCarre/InkubaLM-multitask_dist",
39
+ "model_type": "llama",
40
+ "name": "sentiment-analysis",
41
+ "version": "adapters.1.1.0"
42
+ }
sentiment-analysis/head_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "tanh",
4
+ "bias": true,
5
+ "dropout_prob": null,
6
+ "head_type": "classification",
7
+ "label2id": {
8
+ "LABEL_0": 0,
9
+ "LABEL_1": 1,
10
+ "LABEL_2": 2
11
+ },
12
+ "layers": 2,
13
+ "num_labels": 3,
14
+ "use_pooler": false
15
+ },
16
+ "hidden_size": 1024,
17
+ "model_class": "LlamaAdapterModel",
18
+ "model_name": "YvanCarre/InkubaLM-multitask_dist",
19
+ "model_type": "llama",
20
+ "name": "sentiment-analysis",
21
+ "version": "adapters.1.1.0"
22
+ }
sentiment-analysis/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d72b8753b233b64a56c79d74a1a0c6a96013550d61fb2f774491000b213bc95
3
+ size 2121030
sentiment-analysis/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69ce5a09b8d3f7bf06ae3cfe07e5c442c6b4ff7384873128b3f4174378add5db
3
+ size 4212904
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "</s>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c41fcc6d44fcc4e8269e41dffe0123687baf800bd95a9c8b5d48abd9cb8971b
3
+ size 991189
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "extra_special_tokens": {},
35
+ "legacy": true,
36
+ "model_max_length": 1000000000000000019884624838656,
37
+ "pad_token": "</s>",
38
+ "sp_model_kwargs": {},
39
+ "spaces_between_special_tokens": false,
40
+ "tokenizer_class": "LlamaTokenizer",
41
+ "unk_token": "<unk>",
42
+ "use_default_system_prompt": false
43
+ }
xnli/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "adapter_residual_before_ln": false,
4
+ "cross_adapter": false,
5
+ "dropout": 0.0,
6
+ "factorized_phm_W": true,
7
+ "factorized_phm_rule": false,
8
+ "hypercomplex_nonlinearity": "glorot-uniform",
9
+ "init_weights": "bert",
10
+ "inv_adapter": null,
11
+ "inv_adapter_reduction_factor": null,
12
+ "is_parallel": false,
13
+ "learn_phm": true,
14
+ "leave_out": [],
15
+ "ln_after": false,
16
+ "ln_before": false,
17
+ "mh_adapter": false,
18
+ "non_linearity": "relu",
19
+ "original_ln_after": true,
20
+ "original_ln_before": true,
21
+ "output_adapter": true,
22
+ "phm_bias": true,
23
+ "phm_c_init": "normal",
24
+ "phm_dim": 4,
25
+ "phm_init_range": 0.0001,
26
+ "phm_layer": false,
27
+ "phm_rank": 1,
28
+ "reduction_factor": 16,
29
+ "residual_before_ln": true,
30
+ "scaling": 1.0,
31
+ "shared_W_phm": false,
32
+ "shared_phm_rule": true,
33
+ "stochastic_depth": 0.0,
34
+ "use_gating": false
35
+ },
36
+ "hidden_size": 1024,
37
+ "model_class": "LlamaAdapterModel",
38
+ "model_name": "YvanCarre/InkubaLM-multitask_dist",
39
+ "model_type": "llama",
40
+ "name": "xnli",
41
+ "version": "adapters.1.1.0"
42
+ }
xnli/head_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "activation_function": "tanh",
4
+ "bias": true,
5
+ "dropout_prob": null,
6
+ "head_type": "classification",
7
+ "label2id": {
8
+ "LABEL_0": 0,
9
+ "LABEL_1": 1,
10
+ "LABEL_2": 2
11
+ },
12
+ "layers": 2,
13
+ "num_labels": 3,
14
+ "use_pooler": false
15
+ },
16
+ "hidden_size": 1024,
17
+ "model_class": "LlamaAdapterModel",
18
+ "model_name": "YvanCarre/InkubaLM-multitask_dist",
19
+ "model_type": "llama",
20
+ "name": "xnli",
21
+ "version": "adapters.1.1.0"
22
+ }
xnli/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5cea9826bf481927db306e6b4ca93f1509567126ff1cbdd59d8c8a31180ae2a
3
+ size 2120838
xnli/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:633075e34e4aa22ec66f304f30f45a892556e73c95681e45b8923d31ff93a8ae
3
+ size 4212840