Upload model
Browse files- README.md +40 -0
- a,b/adapter_fusion_config.json +20 -0
- a,b/pytorch_model_adapter_fusion.bin +3 -0
- a/adapter_config.json +42 -0
- a/pytorch_adapter.bin +3 -0
- adapter_setup.json +54 -0
- b/adapter_config.json +42 -0
- b/pytorch_adapter.bin +3 -0
- c/adapter_config.json +42 -0
- c/pytorch_adapter.bin +3 -0
- head_a/head_config.json +21 -0
- head_a/pytorch_model_head.bin +3 -0
- head_b/head_config.json +21 -0
- head_b/pytorch_model_head.bin +3 -0
README.md
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
tags:
|
| 3 |
+
- roberta
|
| 4 |
+
- adapter-transformers
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
# Adapter `calpt/random_adapter_setup_test` for roberta-base
|
| 8 |
+
|
| 9 |
+
An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the None dataset.
|
| 10 |
+
|
| 11 |
+
This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library.
|
| 12 |
+
|
| 13 |
+
## Usage
|
| 14 |
+
|
| 15 |
+
First, install `adapters`:
|
| 16 |
+
|
| 17 |
+
```
|
| 18 |
+
pip install -U adapters
|
| 19 |
+
```
|
| 20 |
+
|
| 21 |
+
Now, the adapter can be loaded and activated like this:
|
| 22 |
+
|
| 23 |
+
```python
|
| 24 |
+
from adapters import AutoAdapterModel
|
| 25 |
+
|
| 26 |
+
model = AutoAdapterModel.from_pretrained("roberta-base")
|
| 27 |
+
adapter_name = model.load_adapter_setup("calpt/random_adapter_setup_test", set_active=True)
|
| 28 |
+
```
|
| 29 |
+
|
| 30 |
+
## Architecture & Training
|
| 31 |
+
|
| 32 |
+
<!-- Add some description here -->
|
| 33 |
+
|
| 34 |
+
## Evaluation results
|
| 35 |
+
|
| 36 |
+
<!-- Add some description here -->
|
| 37 |
+
|
| 38 |
+
## Citation
|
| 39 |
+
|
| 40 |
+
<!-- Add some description here -->
|
a,b/adapter_fusion_config.json
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"dropout_prob": null,
|
| 4 |
+
"key": true,
|
| 5 |
+
"query": true,
|
| 6 |
+
"query_before_ln": false,
|
| 7 |
+
"regularization": true,
|
| 8 |
+
"residual_before": false,
|
| 9 |
+
"temperature": false,
|
| 10 |
+
"value": true,
|
| 11 |
+
"value_before_softmax": true,
|
| 12 |
+
"value_initialized": true
|
| 13 |
+
},
|
| 14 |
+
"hidden_size": 768,
|
| 15 |
+
"model_class": "RobertaAdapterModel",
|
| 16 |
+
"model_name": "roberta-base",
|
| 17 |
+
"model_type": "roberta",
|
| 18 |
+
"name": "a,b",
|
| 19 |
+
"version": "adapters.1.0.1"
|
| 20 |
+
}
|
a,b/pytorch_model_adapter_fusion.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d8bcecfc2e75b242f8543b7c0ec1a62daa03fe188b4a55d5c4429f4a285e3ea2
|
| 3 |
+
size 85030562
|
a/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"adapter_residual_before_ln": false,
|
| 4 |
+
"cross_adapter": false,
|
| 5 |
+
"dropout": 0.0,
|
| 6 |
+
"factorized_phm_W": true,
|
| 7 |
+
"factorized_phm_rule": false,
|
| 8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
| 9 |
+
"init_weights": "bert",
|
| 10 |
+
"inv_adapter": null,
|
| 11 |
+
"inv_adapter_reduction_factor": null,
|
| 12 |
+
"is_parallel": false,
|
| 13 |
+
"learn_phm": true,
|
| 14 |
+
"leave_out": [],
|
| 15 |
+
"ln_after": false,
|
| 16 |
+
"ln_before": false,
|
| 17 |
+
"mh_adapter": false,
|
| 18 |
+
"non_linearity": "relu",
|
| 19 |
+
"original_ln_after": true,
|
| 20 |
+
"original_ln_before": true,
|
| 21 |
+
"output_adapter": true,
|
| 22 |
+
"phm_bias": true,
|
| 23 |
+
"phm_c_init": "normal",
|
| 24 |
+
"phm_dim": 4,
|
| 25 |
+
"phm_init_range": 0.0001,
|
| 26 |
+
"phm_layer": false,
|
| 27 |
+
"phm_rank": 1,
|
| 28 |
+
"reduction_factor": 16,
|
| 29 |
+
"residual_before_ln": true,
|
| 30 |
+
"scaling": 1.0,
|
| 31 |
+
"shared_W_phm": false,
|
| 32 |
+
"shared_phm_rule": true,
|
| 33 |
+
"stochastic_depth": 0.0,
|
| 34 |
+
"use_gating": false
|
| 35 |
+
},
|
| 36 |
+
"hidden_size": 768,
|
| 37 |
+
"model_class": "RobertaAdapterModel",
|
| 38 |
+
"model_name": "roberta-base",
|
| 39 |
+
"model_type": "roberta",
|
| 40 |
+
"name": "a",
|
| 41 |
+
"version": "adapters.1.0.1"
|
| 42 |
+
}
|
a/pytorch_adapter.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f79ecbb07db177ceaa80e80a5e47fe26826e2ef24a7c6bc5215e5700820807bd
|
| 3 |
+
size 3594726
|
adapter_setup.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"adapter_setup": {
|
| 3 |
+
"type": "Stack",
|
| 4 |
+
"children": [
|
| 5 |
+
{
|
| 6 |
+
"type": "Fuse",
|
| 7 |
+
"children": [
|
| 8 |
+
{
|
| 9 |
+
"type": "single",
|
| 10 |
+
"children": [
|
| 11 |
+
"a"
|
| 12 |
+
]
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "single",
|
| 16 |
+
"children": [
|
| 17 |
+
"b"
|
| 18 |
+
]
|
| 19 |
+
}
|
| 20 |
+
]
|
| 21 |
+
},
|
| 22 |
+
{
|
| 23 |
+
"type": "single",
|
| 24 |
+
"children": [
|
| 25 |
+
"c"
|
| 26 |
+
]
|
| 27 |
+
}
|
| 28 |
+
]
|
| 29 |
+
},
|
| 30 |
+
"head_setup": {
|
| 31 |
+
"type": "BatchSplit",
|
| 32 |
+
"children": [
|
| 33 |
+
{
|
| 34 |
+
"type": "single",
|
| 35 |
+
"children": [
|
| 36 |
+
"head_a"
|
| 37 |
+
]
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"type": "single",
|
| 41 |
+
"children": [
|
| 42 |
+
"head_b"
|
| 43 |
+
]
|
| 44 |
+
}
|
| 45 |
+
],
|
| 46 |
+
"kwargs": {
|
| 47 |
+
"batch_sizes": [
|
| 48 |
+
1,
|
| 49 |
+
1
|
| 50 |
+
]
|
| 51 |
+
}
|
| 52 |
+
},
|
| 53 |
+
"version": "adapters.1.0.1"
|
| 54 |
+
}
|
b/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"adapter_residual_before_ln": false,
|
| 4 |
+
"cross_adapter": false,
|
| 5 |
+
"dropout": 0.0,
|
| 6 |
+
"factorized_phm_W": true,
|
| 7 |
+
"factorized_phm_rule": false,
|
| 8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
| 9 |
+
"init_weights": "bert",
|
| 10 |
+
"inv_adapter": null,
|
| 11 |
+
"inv_adapter_reduction_factor": null,
|
| 12 |
+
"is_parallel": false,
|
| 13 |
+
"learn_phm": true,
|
| 14 |
+
"leave_out": [],
|
| 15 |
+
"ln_after": false,
|
| 16 |
+
"ln_before": false,
|
| 17 |
+
"mh_adapter": false,
|
| 18 |
+
"non_linearity": "relu",
|
| 19 |
+
"original_ln_after": true,
|
| 20 |
+
"original_ln_before": true,
|
| 21 |
+
"output_adapter": true,
|
| 22 |
+
"phm_bias": true,
|
| 23 |
+
"phm_c_init": "normal",
|
| 24 |
+
"phm_dim": 4,
|
| 25 |
+
"phm_init_range": 0.0001,
|
| 26 |
+
"phm_layer": false,
|
| 27 |
+
"phm_rank": 1,
|
| 28 |
+
"reduction_factor": 16,
|
| 29 |
+
"residual_before_ln": true,
|
| 30 |
+
"scaling": 1.0,
|
| 31 |
+
"shared_W_phm": false,
|
| 32 |
+
"shared_phm_rule": true,
|
| 33 |
+
"stochastic_depth": 0.0,
|
| 34 |
+
"use_gating": false
|
| 35 |
+
},
|
| 36 |
+
"hidden_size": 768,
|
| 37 |
+
"model_class": "RobertaAdapterModel",
|
| 38 |
+
"model_name": "roberta-base",
|
| 39 |
+
"model_type": "roberta",
|
| 40 |
+
"name": "b",
|
| 41 |
+
"version": "adapters.1.0.1"
|
| 42 |
+
}
|
b/pytorch_adapter.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59155aac6aa69c72d6fae6ae4cabc0c27ac3b1473f52c1023965042ae24521c1
|
| 3 |
+
size 3594726
|
c/adapter_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"adapter_residual_before_ln": false,
|
| 4 |
+
"cross_adapter": false,
|
| 5 |
+
"dropout": 0.0,
|
| 6 |
+
"factorized_phm_W": true,
|
| 7 |
+
"factorized_phm_rule": false,
|
| 8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
| 9 |
+
"init_weights": "bert",
|
| 10 |
+
"inv_adapter": null,
|
| 11 |
+
"inv_adapter_reduction_factor": null,
|
| 12 |
+
"is_parallel": false,
|
| 13 |
+
"learn_phm": true,
|
| 14 |
+
"leave_out": [],
|
| 15 |
+
"ln_after": false,
|
| 16 |
+
"ln_before": false,
|
| 17 |
+
"mh_adapter": false,
|
| 18 |
+
"non_linearity": "relu",
|
| 19 |
+
"original_ln_after": true,
|
| 20 |
+
"original_ln_before": true,
|
| 21 |
+
"output_adapter": true,
|
| 22 |
+
"phm_bias": true,
|
| 23 |
+
"phm_c_init": "normal",
|
| 24 |
+
"phm_dim": 4,
|
| 25 |
+
"phm_init_range": 0.0001,
|
| 26 |
+
"phm_layer": false,
|
| 27 |
+
"phm_rank": 1,
|
| 28 |
+
"reduction_factor": 16,
|
| 29 |
+
"residual_before_ln": true,
|
| 30 |
+
"scaling": 1.0,
|
| 31 |
+
"shared_W_phm": false,
|
| 32 |
+
"shared_phm_rule": true,
|
| 33 |
+
"stochastic_depth": 0.0,
|
| 34 |
+
"use_gating": false
|
| 35 |
+
},
|
| 36 |
+
"hidden_size": 768,
|
| 37 |
+
"model_class": "RobertaAdapterModel",
|
| 38 |
+
"model_name": "roberta-base",
|
| 39 |
+
"model_type": "roberta",
|
| 40 |
+
"name": "c",
|
| 41 |
+
"version": "adapters.1.0.1"
|
| 42 |
+
}
|
c/pytorch_adapter.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cae3b805ce309fc7cf7e833a3d50c8ab215439a6ca843aaa1e4931834df8608f
|
| 3 |
+
size 3594726
|
head_a/head_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"activation_function": "tanh",
|
| 4 |
+
"bias": true,
|
| 5 |
+
"dropout_prob": null,
|
| 6 |
+
"head_type": "classification",
|
| 7 |
+
"label2id": {
|
| 8 |
+
"LABEL_0": 0,
|
| 9 |
+
"LABEL_1": 1
|
| 10 |
+
},
|
| 11 |
+
"layers": 2,
|
| 12 |
+
"num_labels": 2,
|
| 13 |
+
"use_pooler": false
|
| 14 |
+
},
|
| 15 |
+
"hidden_size": 768,
|
| 16 |
+
"model_class": "RobertaAdapterModel",
|
| 17 |
+
"model_name": "roberta-base",
|
| 18 |
+
"model_type": "roberta",
|
| 19 |
+
"name": "head_a",
|
| 20 |
+
"version": "adapters.1.0.1"
|
| 21 |
+
}
|
head_a/pytorch_model_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8516d68908fffa96a06f04777599c35d9db33f4d1e36b8b33e74172688232987
|
| 3 |
+
size 2370600
|
head_b/head_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"config": {
|
| 3 |
+
"activation_function": "tanh",
|
| 4 |
+
"bias": true,
|
| 5 |
+
"dropout_prob": null,
|
| 6 |
+
"head_type": "classification",
|
| 7 |
+
"label2id": {
|
| 8 |
+
"LABEL_0": 0,
|
| 9 |
+
"LABEL_1": 1
|
| 10 |
+
},
|
| 11 |
+
"layers": 2,
|
| 12 |
+
"num_labels": 2,
|
| 13 |
+
"use_pooler": false
|
| 14 |
+
},
|
| 15 |
+
"hidden_size": 768,
|
| 16 |
+
"model_class": "RobertaAdapterModel",
|
| 17 |
+
"model_name": "roberta-base",
|
| 18 |
+
"model_type": "roberta",
|
| 19 |
+
"name": "head_b",
|
| 20 |
+
"version": "adapters.1.0.1"
|
| 21 |
+
}
|
head_b/pytorch_model_head.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b709666017acdd1cdd922167f0fef1eafa2f933a27ae22169da43ffab988ae6
|
| 3 |
+
size 2370600
|