2 runs
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- outputs/args.json +40 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/args.json +33 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/cola_bert-base-uncased_train_loss.png +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/cola_bert-base-uncased_validation_loss.png +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/logfile.log +379 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/adapter_config.json +41 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/all_results.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/all_results_val.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/eval_res.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/gpu_stats.json +130 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/head_config.json +21 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_adapter.bin +3 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_model_head.bin +3 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/special_tokens_map.json +7 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/tokenizer.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/tokenizer_config.json +56 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/val_res.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/vocab.txt +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/adapter_config.json +41 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/all_results.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/all_results_val.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/eval_res.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/gpu_stats.json +130 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/head_config.json +21 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_adapter.bin +3 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_model_head.bin +3 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/special_tokens_map.json +7 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/tokenizer.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/tokenizer_config.json +56 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/val_res.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/vocab.txt +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/adapter_config.json +41 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/all_results.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/all_results_val.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/eval_res.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/gpu_stats.json +130 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/head_config.json +21 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_adapter.bin +3 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_model_head.bin +3 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/special_tokens_map.json +7 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/tokenizer.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/tokenizer_config.json +56 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/val_res.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/vocab.txt +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/adapter_config.json +41 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/all_results.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/all_results_val.json +1 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/eval_res.json +0 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/gpu_stats.json +130 -0
- outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/head_config.json +21 -0
outputs/args.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"task_name": "mrpc",
|
3 |
+
"train_file": null,
|
4 |
+
"validation_file": null,
|
5 |
+
"max_length": 300,
|
6 |
+
"pad_to_max_length": false,
|
7 |
+
"model_name_or_path": "roberta-base",
|
8 |
+
"use_slow_tokenizer": false,
|
9 |
+
"per_device_train_batch_size": 8,
|
10 |
+
"per_device_eval_batch_size": 8,
|
11 |
+
"learning_rate": 0.0001,
|
12 |
+
"weight_decay": 0.0,
|
13 |
+
"num_train_epochs": 3,
|
14 |
+
"max_train_steps": 10000,
|
15 |
+
"gradient_accumulation_steps": 1,
|
16 |
+
"lr_scheduler_type": "linear",
|
17 |
+
"num_warmup_steps": 0,
|
18 |
+
"output_dir": "./outputs",
|
19 |
+
"peft_method": null,
|
20 |
+
"seed": 42,
|
21 |
+
"push_to_hub": false,
|
22 |
+
"hub_model_id": null,
|
23 |
+
"hub_token": null,
|
24 |
+
"checkpointing_steps": "1000",
|
25 |
+
"resume_from_checkpoint": null,
|
26 |
+
"with_tracking": false,
|
27 |
+
"report_to": "all",
|
28 |
+
"ignore_mismatched_sizes": true,
|
29 |
+
"save": false,
|
30 |
+
"load_step": 999,
|
31 |
+
"laplace_hessian": "kron",
|
32 |
+
"laplace_sub": "all",
|
33 |
+
"laplace_prior": "homo",
|
34 |
+
"laplace_optim_step": 1000,
|
35 |
+
"testing_set": "train_val",
|
36 |
+
"laplace_predict": "mc_corr",
|
37 |
+
"lm_head": false,
|
38 |
+
"cache_dir": "/content/cache/huggingface/metrics/glue",
|
39 |
+
"step_list": []
|
40 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/args.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"task_name": "cola",
|
3 |
+
"train_file": null,
|
4 |
+
"validation_file": null,
|
5 |
+
"max_length": 300,
|
6 |
+
"pad_to_max_length": false,
|
7 |
+
"model_name_or_path": "bert-base-uncased",
|
8 |
+
"use_slow_tokenizer": false,
|
9 |
+
"per_device_train_batch_size": 8,
|
10 |
+
"per_device_eval_batch_size": 8,
|
11 |
+
"learning_rate": 0.0001,
|
12 |
+
"max_grad_norm": 0.5,
|
13 |
+
"weight_decay": 0.0,
|
14 |
+
"num_train_epochs": 3,
|
15 |
+
"max_train_steps": 10000,
|
16 |
+
"gradient_accumulation_steps": 1,
|
17 |
+
"lr_scheduler_type": "linear",
|
18 |
+
"num_warmup_steps": 0,
|
19 |
+
"output_dir": "./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000",
|
20 |
+
"seed": 12345,
|
21 |
+
"push_to_hub": false,
|
22 |
+
"hub_model_id": null,
|
23 |
+
"hub_token": null,
|
24 |
+
"checkpointing_steps": "2000",
|
25 |
+
"resume_from_checkpoint": null,
|
26 |
+
"with_tracking": false,
|
27 |
+
"report_to": "all",
|
28 |
+
"ignore_mismatched_sizes": true,
|
29 |
+
"save_train_results": false,
|
30 |
+
"testing_set": "train_val",
|
31 |
+
"lm_head": true,
|
32 |
+
"leave_out": null
|
33 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/cola_bert-base-uncased_train_loss.png
ADDED
![]() |
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/cola_bert-base-uncased_validation_loss.png
ADDED
![]() |
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/logfile.log
ADDED
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
06/01/2024 17:48:32 - INFO - __main__ - Number of labels detected = 2
|
2 |
+
06/01/2024 17:48:40 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 30522, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
|
3 |
+
06/01/2024 17:48:41 - INFO - __main__ - Number of labels detected = 2
|
4 |
+
06/01/2024 17:48:41 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
|
5 |
+
06/01/2024 17:48:41 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
|
6 |
+
06/01/2024 17:48:41 - INFO - __main__ - ================================================================================
|
7 |
+
Name Architecture #Param %Param Active Train
|
8 |
+
--------------------------------------------------------------------------------
|
9 |
+
cola bottleneck 1,789,056 1.634 1 1
|
10 |
+
--------------------------------------------------------------------------------
|
11 |
+
Full model 109,482,240 100.000 0
|
12 |
+
================================================================================
|
13 |
+
06/01/2024 17:48:41 - INFO - __main__ - printing model
|
14 |
+
06/01/2024 17:48:41 - INFO - __main__ - BertAdapterModel(
|
15 |
+
(bert): BertModel(
|
16 |
+
(embeddings): BertEmbeddings(
|
17 |
+
(word_embeddings): Embedding(30522, 768, padding_idx=0)
|
18 |
+
(position_embeddings): Embedding(512, 768)
|
19 |
+
(token_type_embeddings): Embedding(2, 768)
|
20 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
21 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
22 |
+
)
|
23 |
+
(encoder): BertEncoder(
|
24 |
+
(layer): ModuleList(
|
25 |
+
(0-11): 12 x BertLayer(
|
26 |
+
(attention): BertAttention(
|
27 |
+
(self): BertSelfAttentionWithAdapters(
|
28 |
+
(query): LoRALinearTorch(
|
29 |
+
in_features=768, out_features=768, bias=True
|
30 |
+
(loras): ModuleDict()
|
31 |
+
)
|
32 |
+
(key): LoRALinearTorch(
|
33 |
+
in_features=768, out_features=768, bias=True
|
34 |
+
(loras): ModuleDict()
|
35 |
+
)
|
36 |
+
(value): LoRALinearTorch(
|
37 |
+
in_features=768, out_features=768, bias=True
|
38 |
+
(loras): ModuleDict()
|
39 |
+
)
|
40 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
41 |
+
(prefix_tuning): PrefixTuningLayer(
|
42 |
+
(prefix_gates): ModuleDict()
|
43 |
+
(pool): PrefixTuningPool(
|
44 |
+
(prefix_tunings): ModuleDict()
|
45 |
+
)
|
46 |
+
)
|
47 |
+
)
|
48 |
+
(output): BertSelfOutputWithAdapters(
|
49 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
50 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
51 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
52 |
+
(adapters): ModuleDict(
|
53 |
+
(cola): Adapter(
|
54 |
+
(non_linearity): Activation_Function_Class(
|
55 |
+
(f): SiLU()
|
56 |
+
)
|
57 |
+
(adapter_down): Sequential(
|
58 |
+
(0): Linear(in_features=768, out_features=48, bias=True)
|
59 |
+
(1): Activation_Function_Class(
|
60 |
+
(f): SiLU()
|
61 |
+
)
|
62 |
+
)
|
63 |
+
(adapter_up): Linear(in_features=48, out_features=768, bias=True)
|
64 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
65 |
+
)
|
66 |
+
)
|
67 |
+
(adapter_fusion_layer): ModuleDict()
|
68 |
+
)
|
69 |
+
)
|
70 |
+
(intermediate): BertIntermediate(
|
71 |
+
(dense): LoRALinearTorch(
|
72 |
+
in_features=768, out_features=3072, bias=True
|
73 |
+
(loras): ModuleDict()
|
74 |
+
)
|
75 |
+
(intermediate_act_fn): GELUActivation()
|
76 |
+
)
|
77 |
+
(output): BertOutputWithAdapters(
|
78 |
+
(dense): LoRALinearTorch(
|
79 |
+
in_features=3072, out_features=768, bias=True
|
80 |
+
(loras): ModuleDict()
|
81 |
+
)
|
82 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
83 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
84 |
+
(adapters): ModuleDict(
|
85 |
+
(cola): Adapter(
|
86 |
+
(non_linearity): Activation_Function_Class(
|
87 |
+
(f): SiLU()
|
88 |
+
)
|
89 |
+
(adapter_down): Sequential(
|
90 |
+
(0): Linear(in_features=768, out_features=48, bias=True)
|
91 |
+
(1): Activation_Function_Class(
|
92 |
+
(f): SiLU()
|
93 |
+
)
|
94 |
+
)
|
95 |
+
(adapter_up): Linear(in_features=48, out_features=768, bias=True)
|
96 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
97 |
+
)
|
98 |
+
)
|
99 |
+
(adapter_fusion_layer): ModuleDict()
|
100 |
+
)
|
101 |
+
)
|
102 |
+
)
|
103 |
+
)
|
104 |
+
(pooler): BertPooler(
|
105 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
106 |
+
(activation): Tanh()
|
107 |
+
)
|
108 |
+
(invertible_adapters): ModuleDict()
|
109 |
+
(shared_parameters): ModuleDict()
|
110 |
+
(prefix_tuning): PrefixTuningPool(
|
111 |
+
(prefix_tunings): ModuleDict()
|
112 |
+
)
|
113 |
+
(prompt_tuning): PromptTuningLayer(
|
114 |
+
(base_model_embeddings): Embedding(30522, 768, padding_idx=0)
|
115 |
+
(prompt_tunings): ModuleDict()
|
116 |
+
)
|
117 |
+
)
|
118 |
+
(heads): ModuleDict(
|
119 |
+
(default): BertStyleMaskedLMHead(
|
120 |
+
(0): Linear(in_features=768, out_features=768, bias=True)
|
121 |
+
(1): Activation_Function_Class(
|
122 |
+
(f): GELUActivation()
|
123 |
+
)
|
124 |
+
(2): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
125 |
+
(3): Linear(in_features=768, out_features=30522, bias=True)
|
126 |
+
)
|
127 |
+
(cola): ClassificationHead(
|
128 |
+
(0): Dropout(p=0.1, inplace=False)
|
129 |
+
(1): Linear(in_features=768, out_features=768, bias=True)
|
130 |
+
(2): Activation_Function_Class(
|
131 |
+
(f): Tanh()
|
132 |
+
)
|
133 |
+
(3): Dropout(p=0.1, inplace=False)
|
134 |
+
(4): Linear(in_features=768, out_features=2, bias=True)
|
135 |
+
)
|
136 |
+
)
|
137 |
+
)
|
138 |
+
06/01/2024 17:48:42 - INFO - __main__ - Sample 3412 of the training set: {'input_ids': [101, 1045, 12781, 1996, 7427, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
|
139 |
+
06/01/2024 17:48:42 - INFO - __main__ - Sample 6002 of the training set: {'input_ids': [101, 1045, 2442, 2064, 4521, 22088, 2015, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
|
140 |
+
06/01/2024 17:48:42 - INFO - __main__ - Sample 83 of the training set: {'input_ids': [101, 1996, 7764, 22257, 2993, 2000, 1996, 2598, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
|
141 |
+
06/01/2024 17:48:42 - INFO - __main__ - Max training steps before recalculation = 10000
|
142 |
+
06/01/2024 17:48:42 - INFO - __main__ - num_update_steps_per_epoch initial = 855
|
143 |
+
06/01/2024 17:48:42 - INFO - __main__ - num training epochs initial = 3
|
144 |
+
06/01/2024 17:48:42 - INFO - __main__ - Adjusted num_train_epochs based on max_train_steps: 3
|
145 |
+
06/01/2024 17:48:42 - INFO - __main__ - num_update_steps_per_epoch before recalculation = 855
|
146 |
+
06/01/2024 17:48:42 - INFO - __main__ - num_update_steps_per_epoch after recalculation = 855
|
147 |
+
06/01/2024 17:48:42 - INFO - __main__ - num training epochs before recalculation = 12
|
148 |
+
06/01/2024 17:48:43 - INFO - __main__ - ***** Running training *****
|
149 |
+
06/01/2024 17:48:43 - INFO - __main__ - Num examples = 6840
|
150 |
+
06/01/2024 17:48:43 - INFO - __main__ - Num Epochs = 12
|
151 |
+
06/01/2024 17:48:43 - INFO - __main__ - Instantaneous batch size per device = 8
|
152 |
+
06/01/2024 17:48:43 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 8
|
153 |
+
06/01/2024 17:48:43 - INFO - __main__ - Gradient Accumulation steps = 1
|
154 |
+
06/01/2024 17:48:43 - INFO - __main__ - Total optimization steps = 10000
|
155 |
+
06/01/2024 17:48:49 - INFO - __main__ - epoch 0: {'matthews_correlation': -0.02929206145132745}
|
156 |
+
06/01/2024 17:48:49 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/adapter_config.json
|
157 |
+
06/01/2024 17:48:49 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_adapter.bin
|
158 |
+
06/01/2024 17:48:49 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/head_config.json
|
159 |
+
06/01/2024 17:48:49 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_model_head.bin
|
160 |
+
06/01/2024 17:48:54 - INFO - __main__ - epoch 0: {'matthews_correlation': 0.0}
|
161 |
+
06/01/2024 17:48:54 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/adapter_config.json
|
162 |
+
06/01/2024 17:48:54 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_adapter.bin
|
163 |
+
06/01/2024 17:48:54 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/head_config.json
|
164 |
+
06/01/2024 17:48:54 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_model_head.bin
|
165 |
+
06/01/2024 17:51:04 - INFO - __main__ - Number of labels detected = 2
|
166 |
+
06/01/2024 17:51:05 - INFO - adapters.heads.model_mixin - Adding head 'default' with config {'head_type': 'masked_lm', 'vocab_size': 30522, 'embedding_size': 768, 'layers': 2, 'activation_function': 'gelu', 'layer_norm': True, 'bias': True, 'shift_labels': False, 'label2id': None}.
|
167 |
+
06/01/2024 17:51:06 - INFO - __main__ - Number of labels detected = 2
|
168 |
+
06/01/2024 17:51:07 - INFO - adapters.heads.model_mixin - Adding head 'cola' with config {'head_type': 'classification', 'num_labels': 2, 'layers': 2, 'activation_function': 'tanh', 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'use_pooler': False, 'bias': True, 'dropout_prob': None}.
|
169 |
+
06/01/2024 17:51:07 - INFO - adapters.configuration.model_adapters_config - Adding adapter 'cola'.
|
170 |
+
06/01/2024 17:51:07 - INFO - __main__ - ================================================================================
|
171 |
+
Name Architecture #Param %Param Active Train
|
172 |
+
--------------------------------------------------------------------------------
|
173 |
+
cola bottleneck 1,789,056 1.634 1 1
|
174 |
+
--------------------------------------------------------------------------------
|
175 |
+
Full model 109,482,240 100.000 0
|
176 |
+
================================================================================
|
177 |
+
06/01/2024 17:51:07 - INFO - __main__ - printing model
|
178 |
+
06/01/2024 17:51:07 - INFO - __main__ - BertAdapterModel(
|
179 |
+
(bert): BertModel(
|
180 |
+
(embeddings): BertEmbeddings(
|
181 |
+
(word_embeddings): Embedding(30522, 768, padding_idx=0)
|
182 |
+
(position_embeddings): Embedding(512, 768)
|
183 |
+
(token_type_embeddings): Embedding(2, 768)
|
184 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
185 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
186 |
+
)
|
187 |
+
(encoder): BertEncoder(
|
188 |
+
(layer): ModuleList(
|
189 |
+
(0-11): 12 x BertLayer(
|
190 |
+
(attention): BertAttention(
|
191 |
+
(self): BertSelfAttentionWithAdapters(
|
192 |
+
(query): LoRALinearTorch(
|
193 |
+
in_features=768, out_features=768, bias=True
|
194 |
+
(loras): ModuleDict()
|
195 |
+
)
|
196 |
+
(key): LoRALinearTorch(
|
197 |
+
in_features=768, out_features=768, bias=True
|
198 |
+
(loras): ModuleDict()
|
199 |
+
)
|
200 |
+
(value): LoRALinearTorch(
|
201 |
+
in_features=768, out_features=768, bias=True
|
202 |
+
(loras): ModuleDict()
|
203 |
+
)
|
204 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
205 |
+
(prefix_tuning): PrefixTuningLayer(
|
206 |
+
(prefix_gates): ModuleDict()
|
207 |
+
(pool): PrefixTuningPool(
|
208 |
+
(prefix_tunings): ModuleDict()
|
209 |
+
)
|
210 |
+
)
|
211 |
+
)
|
212 |
+
(output): BertSelfOutputWithAdapters(
|
213 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
214 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
215 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
216 |
+
(adapters): ModuleDict(
|
217 |
+
(cola): Adapter(
|
218 |
+
(non_linearity): Activation_Function_Class(
|
219 |
+
(f): SiLU()
|
220 |
+
)
|
221 |
+
(adapter_down): Sequential(
|
222 |
+
(0): Linear(in_features=768, out_features=48, bias=True)
|
223 |
+
(1): Activation_Function_Class(
|
224 |
+
(f): SiLU()
|
225 |
+
)
|
226 |
+
)
|
227 |
+
(adapter_up): Linear(in_features=48, out_features=768, bias=True)
|
228 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
229 |
+
)
|
230 |
+
)
|
231 |
+
(adapter_fusion_layer): ModuleDict()
|
232 |
+
)
|
233 |
+
)
|
234 |
+
(intermediate): BertIntermediate(
|
235 |
+
(dense): LoRALinearTorch(
|
236 |
+
in_features=768, out_features=3072, bias=True
|
237 |
+
(loras): ModuleDict()
|
238 |
+
)
|
239 |
+
(intermediate_act_fn): GELUActivation()
|
240 |
+
)
|
241 |
+
(output): BertOutputWithAdapters(
|
242 |
+
(dense): LoRALinearTorch(
|
243 |
+
in_features=3072, out_features=768, bias=True
|
244 |
+
(loras): ModuleDict()
|
245 |
+
)
|
246 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
247 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
248 |
+
(adapters): ModuleDict(
|
249 |
+
(cola): Adapter(
|
250 |
+
(non_linearity): Activation_Function_Class(
|
251 |
+
(f): SiLU()
|
252 |
+
)
|
253 |
+
(adapter_down): Sequential(
|
254 |
+
(0): Linear(in_features=768, out_features=48, bias=True)
|
255 |
+
(1): Activation_Function_Class(
|
256 |
+
(f): SiLU()
|
257 |
+
)
|
258 |
+
)
|
259 |
+
(adapter_up): Linear(in_features=48, out_features=768, bias=True)
|
260 |
+
(dropout): Dropout(p=0.0, inplace=False)
|
261 |
+
)
|
262 |
+
)
|
263 |
+
(adapter_fusion_layer): ModuleDict()
|
264 |
+
)
|
265 |
+
)
|
266 |
+
)
|
267 |
+
)
|
268 |
+
(pooler): BertPooler(
|
269 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
270 |
+
(activation): Tanh()
|
271 |
+
)
|
272 |
+
(invertible_adapters): ModuleDict()
|
273 |
+
(shared_parameters): ModuleDict()
|
274 |
+
(prefix_tuning): PrefixTuningPool(
|
275 |
+
(prefix_tunings): ModuleDict()
|
276 |
+
)
|
277 |
+
(prompt_tuning): PromptTuningLayer(
|
278 |
+
(base_model_embeddings): Embedding(30522, 768, padding_idx=0)
|
279 |
+
(prompt_tunings): ModuleDict()
|
280 |
+
)
|
281 |
+
)
|
282 |
+
(heads): ModuleDict(
|
283 |
+
(default): BertStyleMaskedLMHead(
|
284 |
+
(0): Linear(in_features=768, out_features=768, bias=True)
|
285 |
+
(1): Activation_Function_Class(
|
286 |
+
(f): GELUActivation()
|
287 |
+
)
|
288 |
+
(2): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
289 |
+
(3): Linear(in_features=768, out_features=30522, bias=True)
|
290 |
+
)
|
291 |
+
(cola): ClassificationHead(
|
292 |
+
(0): Dropout(p=0.1, inplace=False)
|
293 |
+
(1): Linear(in_features=768, out_features=768, bias=True)
|
294 |
+
(2): Activation_Function_Class(
|
295 |
+
(f): Tanh()
|
296 |
+
)
|
297 |
+
(3): Dropout(p=0.1, inplace=False)
|
298 |
+
(4): Linear(in_features=768, out_features=2, bias=True)
|
299 |
+
)
|
300 |
+
)
|
301 |
+
)
|
302 |
+
06/01/2024 17:51:07 - INFO - __main__ - Sample 3412 of the training set: {'input_ids': [101, 1045, 12781, 1996, 7427, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
|
303 |
+
06/01/2024 17:51:07 - INFO - __main__ - Sample 6002 of the training set: {'input_ids': [101, 1045, 2442, 2064, 4521, 22088, 2015, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
|
304 |
+
06/01/2024 17:51:07 - INFO - __main__ - Sample 83 of the training set: {'input_ids': [101, 1996, 7764, 22257, 2993, 2000, 1996, 2598, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
|
305 |
+
06/01/2024 17:51:07 - INFO - __main__ - Max training steps before recalculation = 10000
|
306 |
+
06/01/2024 17:51:07 - INFO - __main__ - num_update_steps_per_epoch initial = 855
|
307 |
+
06/01/2024 17:51:07 - INFO - __main__ - num training epochs initial = 3
|
308 |
+
06/01/2024 17:51:07 - INFO - __main__ - Adjusted num_train_epochs based on max_train_steps: 3
|
309 |
+
06/01/2024 17:51:08 - INFO - __main__ - num_update_steps_per_epoch before recalculation = 855
|
310 |
+
06/01/2024 17:51:08 - INFO - __main__ - num_update_steps_per_epoch after recalculation = 855
|
311 |
+
06/01/2024 17:51:08 - INFO - __main__ - num training epochs before recalculation = 12
|
312 |
+
06/01/2024 17:51:09 - INFO - __main__ - ***** Running training *****
|
313 |
+
06/01/2024 17:51:09 - INFO - __main__ - Num examples = 6840
|
314 |
+
06/01/2024 17:51:09 - INFO - __main__ - Num Epochs = 12
|
315 |
+
06/01/2024 17:51:09 - INFO - __main__ - Instantaneous batch size per device = 8
|
316 |
+
06/01/2024 17:51:09 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 8
|
317 |
+
06/01/2024 17:51:09 - INFO - __main__ - Gradient Accumulation steps = 1
|
318 |
+
06/01/2024 17:51:09 - INFO - __main__ - Total optimization steps = 10000
|
319 |
+
06/01/2024 17:51:16 - INFO - __main__ - epoch 0: {'matthews_correlation': -0.02929206145132745}
|
320 |
+
06/01/2024 17:51:16 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/adapter_config.json
|
321 |
+
06/01/2024 17:51:16 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_adapter.bin
|
322 |
+
06/01/2024 17:51:16 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/head_config.json
|
323 |
+
06/01/2024 17:51:16 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_model_head.bin
|
324 |
+
06/01/2024 17:51:24 - INFO - __main__ - epoch 0: {'matthews_correlation': 0.0}
|
325 |
+
06/01/2024 17:51:24 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/adapter_config.json
|
326 |
+
06/01/2024 17:51:24 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_adapter.bin
|
327 |
+
06/01/2024 17:51:24 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/head_config.json
|
328 |
+
06/01/2024 17:51:24 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_model_head.bin
|
329 |
+
06/01/2024 17:53:22 - INFO - __main__ - epoch 2: {'matthews_correlation': 0.4747045393662238}
|
330 |
+
06/01/2024 17:53:22 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/adapter_config.json
|
331 |
+
06/01/2024 17:53:22 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_adapter.bin
|
332 |
+
06/01/2024 17:53:22 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/head_config.json
|
333 |
+
06/01/2024 17:53:22 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_model_head.bin
|
334 |
+
06/01/2024 17:53:28 - INFO - __main__ - epoch 2: {'matthews_correlation': 0.5174977706442678}
|
335 |
+
06/01/2024 17:53:28 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/adapter_config.json
|
336 |
+
06/01/2024 17:53:29 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_adapter.bin
|
337 |
+
06/01/2024 17:53:29 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/head_config.json
|
338 |
+
06/01/2024 17:53:29 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_model_head.bin
|
339 |
+
06/01/2024 17:55:23 - INFO - __main__ - epoch 4: {'matthews_correlation': 0.542244787638552}
|
340 |
+
06/01/2024 17:55:23 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/adapter_config.json
|
341 |
+
06/01/2024 17:55:23 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_adapter.bin
|
342 |
+
06/01/2024 17:55:23 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/head_config.json
|
343 |
+
06/01/2024 17:55:23 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_model_head.bin
|
344 |
+
06/01/2024 17:55:29 - INFO - __main__ - epoch 4: {'matthews_correlation': 0.5568270392858016}
|
345 |
+
06/01/2024 17:55:29 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/adapter_config.json
|
346 |
+
06/01/2024 17:55:29 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_adapter.bin
|
347 |
+
06/01/2024 17:55:29 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/head_config.json
|
348 |
+
06/01/2024 17:55:29 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_model_head.bin
|
349 |
+
06/01/2024 17:57:25 - INFO - __main__ - epoch 7: {'matthews_correlation': 0.5073664747016221}
|
350 |
+
06/01/2024 17:57:25 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/adapter_config.json
|
351 |
+
06/01/2024 17:57:25 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/pytorch_adapter.bin
|
352 |
+
06/01/2024 17:57:25 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/head_config.json
|
353 |
+
06/01/2024 17:57:25 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/pytorch_model_head.bin
|
354 |
+
06/01/2024 17:57:31 - INFO - __main__ - epoch 7: {'matthews_correlation': 0.552840881500124}
|
355 |
+
06/01/2024 17:57:31 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/adapter_config.json
|
356 |
+
06/01/2024 17:57:31 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/pytorch_adapter.bin
|
357 |
+
06/01/2024 17:57:31 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/head_config.json
|
358 |
+
06/01/2024 17:57:31 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/pytorch_model_head.bin
|
359 |
+
06/01/2024 17:59:27 - INFO - __main__ - epoch 9: {'matthews_correlation': 0.5353925809123671}
|
360 |
+
06/01/2024 17:59:27 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/adapter_config.json
|
361 |
+
06/01/2024 17:59:27 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/pytorch_adapter.bin
|
362 |
+
06/01/2024 17:59:27 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/head_config.json
|
363 |
+
06/01/2024 17:59:27 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/pytorch_model_head.bin
|
364 |
+
06/01/2024 17:59:33 - INFO - __main__ - epoch 9: {'matthews_correlation': 0.5495593400575438}
|
365 |
+
06/01/2024 17:59:33 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/adapter_config.json
|
366 |
+
06/01/2024 17:59:33 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/pytorch_adapter.bin
|
367 |
+
06/01/2024 17:59:33 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/head_config.json
|
368 |
+
06/01/2024 17:59:33 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_7999/pytorch_model_head.bin
|
369 |
+
06/01/2024 18:01:29 - INFO - __main__ - epoch 11: {'matthews_correlation': 0.5226700639354173}
|
370 |
+
06/01/2024 18:01:29 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/adapter_config.json
|
371 |
+
06/01/2024 18:01:29 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/pytorch_adapter.bin
|
372 |
+
06/01/2024 18:01:29 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/head_config.json
|
373 |
+
06/01/2024 18:01:29 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/pytorch_model_head.bin
|
374 |
+
06/01/2024 18:01:35 - INFO - __main__ - epoch 11: {'matthews_correlation': 0.540076124607824}
|
375 |
+
06/01/2024 18:01:35 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/adapter_config.json
|
376 |
+
06/01/2024 18:01:35 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/pytorch_adapter.bin
|
377 |
+
06/01/2024 18:01:35 - INFO - adapters.loading - Configuration saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/head_config.json
|
378 |
+
06/01/2024 18:01:35 - INFO - adapters.loading - Module weights saved in ./outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_9999/pytorch_model_head.bin
|
379 |
+
06/01/2024 18:01:35 - INFO - __main__ - ***** Completed training *****
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/adapter_config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"adapter_residual_before_ln": false,
|
4 |
+
"cross_adapter": false,
|
5 |
+
"dropout": 0.0,
|
6 |
+
"factorized_phm_W": true,
|
7 |
+
"factorized_phm_rule": false,
|
8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
9 |
+
"init_weights": "bert",
|
10 |
+
"inv_adapter": null,
|
11 |
+
"inv_adapter_reduction_factor": null,
|
12 |
+
"is_parallel": false,
|
13 |
+
"learn_phm": true,
|
14 |
+
"leave_out": [],
|
15 |
+
"ln_after": false,
|
16 |
+
"ln_before": false,
|
17 |
+
"mh_adapter": true,
|
18 |
+
"non_linearity": "swish",
|
19 |
+
"original_ln_after": true,
|
20 |
+
"original_ln_before": false,
|
21 |
+
"output_adapter": true,
|
22 |
+
"phm_bias": true,
|
23 |
+
"phm_c_init": "normal",
|
24 |
+
"phm_dim": 4,
|
25 |
+
"phm_init_range": 0.0001,
|
26 |
+
"phm_layer": false,
|
27 |
+
"phm_rank": 1,
|
28 |
+
"reduction_factor": 16,
|
29 |
+
"residual_before_ln": true,
|
30 |
+
"scaling": 1.0,
|
31 |
+
"shared_W_phm": false,
|
32 |
+
"shared_phm_rule": true,
|
33 |
+
"use_gating": false
|
34 |
+
},
|
35 |
+
"hidden_size": 768,
|
36 |
+
"model_class": "BertAdapterModel",
|
37 |
+
"model_name": "bert-base-uncased",
|
38 |
+
"model_type": "bert",
|
39 |
+
"name": "cola",
|
40 |
+
"version": "0.2.1"
|
41 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": -0.02929206145132745}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.0}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/gpu_stats.json
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 459623936,
|
3 |
+
"max_memory_allocated": 471682048,
|
4 |
+
"memory_reserved": 532676608,
|
5 |
+
"max_memory_reserved": 532676608,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 229718,
|
8 |
+
"active.all.current": 319,
|
9 |
+
"active.all.freed": 229399,
|
10 |
+
"active.all.peak": 337,
|
11 |
+
"active.large_pool.allocated": 7614,
|
12 |
+
"active.large_pool.current": 78,
|
13 |
+
"active.large_pool.freed": 7536,
|
14 |
+
"active.large_pool.peak": 80,
|
15 |
+
"active.small_pool.allocated": 222104,
|
16 |
+
"active.small_pool.current": 241,
|
17 |
+
"active.small_pool.freed": 221863,
|
18 |
+
"active.small_pool.peak": 259,
|
19 |
+
"active_bytes.all.allocated": 104486025728,
|
20 |
+
"active_bytes.all.current": 459623936,
|
21 |
+
"active_bytes.all.freed": 104026401792,
|
22 |
+
"active_bytes.all.peak": 471682048,
|
23 |
+
"active_bytes.large_pool.allocated": 16832397312,
|
24 |
+
"active_bytes.large_pool.current": 451805184,
|
25 |
+
"active_bytes.large_pool.freed": 16380592128,
|
26 |
+
"active_bytes.large_pool.peak": 458686464,
|
27 |
+
"active_bytes.small_pool.allocated": 87653628416,
|
28 |
+
"active_bytes.small_pool.current": 7818752,
|
29 |
+
"active_bytes.small_pool.freed": 87645809664,
|
30 |
+
"active_bytes.small_pool.peak": 16436224,
|
31 |
+
"allocated_bytes.all.allocated": 104486025728,
|
32 |
+
"allocated_bytes.all.current": 459623936,
|
33 |
+
"allocated_bytes.all.freed": 104026401792,
|
34 |
+
"allocated_bytes.all.peak": 471682048,
|
35 |
+
"allocated_bytes.large_pool.allocated": 16832397312,
|
36 |
+
"allocated_bytes.large_pool.current": 451805184,
|
37 |
+
"allocated_bytes.large_pool.freed": 16380592128,
|
38 |
+
"allocated_bytes.large_pool.peak": 458686464,
|
39 |
+
"allocated_bytes.small_pool.allocated": 87653628416,
|
40 |
+
"allocated_bytes.small_pool.current": 7818752,
|
41 |
+
"allocated_bytes.small_pool.freed": 87645809664,
|
42 |
+
"allocated_bytes.small_pool.peak": 16436224,
|
43 |
+
"allocation.all.allocated": 229718,
|
44 |
+
"allocation.all.current": 319,
|
45 |
+
"allocation.all.freed": 229399,
|
46 |
+
"allocation.all.peak": 337,
|
47 |
+
"allocation.large_pool.allocated": 7614,
|
48 |
+
"allocation.large_pool.current": 78,
|
49 |
+
"allocation.large_pool.freed": 7536,
|
50 |
+
"allocation.large_pool.peak": 80,
|
51 |
+
"allocation.small_pool.allocated": 222104,
|
52 |
+
"allocation.small_pool.current": 241,
|
53 |
+
"allocation.small_pool.freed": 221863,
|
54 |
+
"allocation.small_pool.peak": 259,
|
55 |
+
"inactive_split.all.allocated": 110877,
|
56 |
+
"inactive_split.all.current": 23,
|
57 |
+
"inactive_split.all.freed": 110854,
|
58 |
+
"inactive_split.all.peak": 34,
|
59 |
+
"inactive_split.large_pool.allocated": 7159,
|
60 |
+
"inactive_split.large_pool.current": 18,
|
61 |
+
"inactive_split.large_pool.freed": 7141,
|
62 |
+
"inactive_split.large_pool.peak": 19,
|
63 |
+
"inactive_split.small_pool.allocated": 103718,
|
64 |
+
"inactive_split.small_pool.current": 5,
|
65 |
+
"inactive_split.small_pool.freed": 103713,
|
66 |
+
"inactive_split.small_pool.peak": 15,
|
67 |
+
"inactive_split_bytes.all.allocated": 111369324032,
|
68 |
+
"inactive_split_bytes.all.current": 43692544,
|
69 |
+
"inactive_split_bytes.all.freed": 111325631488,
|
70 |
+
"inactive_split_bytes.all.peak": 67346432,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 18599641088,
|
72 |
+
"inactive_split_bytes.large_pool.current": 41025536,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 18558615552,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 59146240,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 92769682944,
|
76 |
+
"inactive_split_bytes.small_pool.current": 2667008,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 92767015936,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 8200192,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_device_alloc": 30,
|
82 |
+
"num_device_free": 0,
|
83 |
+
"num_ooms": 0,
|
84 |
+
"num_sync_all_streams": 0,
|
85 |
+
"oversize_allocations.allocated": 0,
|
86 |
+
"oversize_allocations.current": 0,
|
87 |
+
"oversize_allocations.freed": 0,
|
88 |
+
"oversize_allocations.peak": 0,
|
89 |
+
"oversize_segments.allocated": 0,
|
90 |
+
"oversize_segments.current": 0,
|
91 |
+
"oversize_segments.freed": 0,
|
92 |
+
"oversize_segments.peak": 0,
|
93 |
+
"requested_bytes.all.allocated": 100127988300,
|
94 |
+
"requested_bytes.all.current": 458479208,
|
95 |
+
"requested_bytes.all.freed": 99669509092,
|
96 |
+
"requested_bytes.all.peak": 470535000,
|
97 |
+
"requested_bytes.large_pool.allocated": 12486326272,
|
98 |
+
"requested_bytes.large_pool.current": 450672640,
|
99 |
+
"requested_bytes.large_pool.freed": 12035653632,
|
100 |
+
"requested_bytes.large_pool.peak": 457553920,
|
101 |
+
"requested_bytes.small_pool.allocated": 87641662028,
|
102 |
+
"requested_bytes.small_pool.current": 7806568,
|
103 |
+
"requested_bytes.small_pool.freed": 87633855460,
|
104 |
+
"requested_bytes.small_pool.peak": 16421720,
|
105 |
+
"reserved_bytes.all.allocated": 532676608,
|
106 |
+
"reserved_bytes.all.current": 532676608,
|
107 |
+
"reserved_bytes.all.freed": 0,
|
108 |
+
"reserved_bytes.all.peak": 532676608,
|
109 |
+
"reserved_bytes.large_pool.allocated": 513802240,
|
110 |
+
"reserved_bytes.large_pool.current": 513802240,
|
111 |
+
"reserved_bytes.large_pool.freed": 0,
|
112 |
+
"reserved_bytes.large_pool.peak": 513802240,
|
113 |
+
"reserved_bytes.small_pool.allocated": 18874368,
|
114 |
+
"reserved_bytes.small_pool.current": 18874368,
|
115 |
+
"reserved_bytes.small_pool.freed": 0,
|
116 |
+
"reserved_bytes.small_pool.peak": 18874368,
|
117 |
+
"segment.all.allocated": 30,
|
118 |
+
"segment.all.current": 30,
|
119 |
+
"segment.all.freed": 0,
|
120 |
+
"segment.all.peak": 30,
|
121 |
+
"segment.large_pool.allocated": 21,
|
122 |
+
"segment.large_pool.current": 21,
|
123 |
+
"segment.large_pool.freed": 0,
|
124 |
+
"segment.large_pool.peak": 21,
|
125 |
+
"segment.small_pool.allocated": 9,
|
126 |
+
"segment.small_pool.current": 9,
|
127 |
+
"segment.small_pool.freed": 0,
|
128 |
+
"segment.small_pool.peak": 9
|
129 |
+
}
|
130 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/head_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"activation_function": "tanh",
|
4 |
+
"bias": true,
|
5 |
+
"dropout_prob": null,
|
6 |
+
"head_type": "classification",
|
7 |
+
"label2id": {
|
8 |
+
"LABEL_0": 0,
|
9 |
+
"LABEL_1": 1
|
10 |
+
},
|
11 |
+
"layers": 2,
|
12 |
+
"num_labels": 2,
|
13 |
+
"use_pooler": false
|
14 |
+
},
|
15 |
+
"hidden_size": 768,
|
16 |
+
"model_class": "BertAdapterModel",
|
17 |
+
"model_name": "bert-base-uncased",
|
18 |
+
"model_type": "bert",
|
19 |
+
"name": "cola",
|
20 |
+
"version": "0.2.1"
|
21 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_adapter.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29b06f7b9617fec3c5bc63794ca9c89f4238d46db9edb1de32e4dd4e5086fae0
|
3 |
+
size 7191062
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/pytorch_model_head.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:244a549f85f7dcaa9f5082c6ca9f115dff60bbcbbd8534f0a78f92ef9b70720d
|
3 |
+
size 2370664
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"padding_side": "left",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/val_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_0/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/adapter_config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"adapter_residual_before_ln": false,
|
4 |
+
"cross_adapter": false,
|
5 |
+
"dropout": 0.0,
|
6 |
+
"factorized_phm_W": true,
|
7 |
+
"factorized_phm_rule": false,
|
8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
9 |
+
"init_weights": "bert",
|
10 |
+
"inv_adapter": null,
|
11 |
+
"inv_adapter_reduction_factor": null,
|
12 |
+
"is_parallel": false,
|
13 |
+
"learn_phm": true,
|
14 |
+
"leave_out": [],
|
15 |
+
"ln_after": false,
|
16 |
+
"ln_before": false,
|
17 |
+
"mh_adapter": true,
|
18 |
+
"non_linearity": "swish",
|
19 |
+
"original_ln_after": true,
|
20 |
+
"original_ln_before": false,
|
21 |
+
"output_adapter": true,
|
22 |
+
"phm_bias": true,
|
23 |
+
"phm_c_init": "normal",
|
24 |
+
"phm_dim": 4,
|
25 |
+
"phm_init_range": 0.0001,
|
26 |
+
"phm_layer": false,
|
27 |
+
"phm_rank": 1,
|
28 |
+
"reduction_factor": 16,
|
29 |
+
"residual_before_ln": true,
|
30 |
+
"scaling": 1.0,
|
31 |
+
"shared_W_phm": false,
|
32 |
+
"shared_phm_rule": true,
|
33 |
+
"use_gating": false
|
34 |
+
},
|
35 |
+
"hidden_size": 768,
|
36 |
+
"model_class": "BertAdapterModel",
|
37 |
+
"model_name": "bert-base-uncased",
|
38 |
+
"model_type": "bert",
|
39 |
+
"name": "cola",
|
40 |
+
"version": "0.2.1"
|
41 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.4747045393662238}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.5174977706442678}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/gpu_stats.json
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 487209472,
|
3 |
+
"max_memory_allocated": 699914240,
|
4 |
+
"memory_reserved": 805306368,
|
5 |
+
"max_memory_reserved": 805306368,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 3076017,
|
8 |
+
"active.all.current": 520,
|
9 |
+
"active.all.freed": 3075497,
|
10 |
+
"active.all.peak": 777,
|
11 |
+
"active.large_pool.allocated": 116558,
|
12 |
+
"active.large_pool.current": 81,
|
13 |
+
"active.large_pool.freed": 116477,
|
14 |
+
"active.large_pool.peak": 183,
|
15 |
+
"active.small_pool.allocated": 2959459,
|
16 |
+
"active.small_pool.current": 439,
|
17 |
+
"active.small_pool.freed": 2959020,
|
18 |
+
"active.small_pool.peak": 696,
|
19 |
+
"active_bytes.all.allocated": 1290817708544,
|
20 |
+
"active_bytes.all.current": 487209472,
|
21 |
+
"active_bytes.all.freed": 1290330499072,
|
22 |
+
"active_bytes.all.peak": 699914240,
|
23 |
+
"active_bytes.large_pool.allocated": 257307901952,
|
24 |
+
"active_bytes.large_pool.current": 465043456,
|
25 |
+
"active_bytes.large_pool.freed": 256842858496,
|
26 |
+
"active_bytes.large_pool.peak": 646561792,
|
27 |
+
"active_bytes.small_pool.allocated": 1033509806592,
|
28 |
+
"active_bytes.small_pool.current": 22166016,
|
29 |
+
"active_bytes.small_pool.freed": 1033487640576,
|
30 |
+
"active_bytes.small_pool.peak": 133540864,
|
31 |
+
"allocated_bytes.all.allocated": 1290817708544,
|
32 |
+
"allocated_bytes.all.current": 487209472,
|
33 |
+
"allocated_bytes.all.freed": 1290330499072,
|
34 |
+
"allocated_bytes.all.peak": 699914240,
|
35 |
+
"allocated_bytes.large_pool.allocated": 257307901952,
|
36 |
+
"allocated_bytes.large_pool.current": 465043456,
|
37 |
+
"allocated_bytes.large_pool.freed": 256842858496,
|
38 |
+
"allocated_bytes.large_pool.peak": 646561792,
|
39 |
+
"allocated_bytes.small_pool.allocated": 1033509806592,
|
40 |
+
"allocated_bytes.small_pool.current": 22166016,
|
41 |
+
"allocated_bytes.small_pool.freed": 1033487640576,
|
42 |
+
"allocated_bytes.small_pool.peak": 133540864,
|
43 |
+
"allocation.all.allocated": 3076017,
|
44 |
+
"allocation.all.current": 520,
|
45 |
+
"allocation.all.freed": 3075497,
|
46 |
+
"allocation.all.peak": 777,
|
47 |
+
"allocation.large_pool.allocated": 116558,
|
48 |
+
"allocation.large_pool.current": 81,
|
49 |
+
"allocation.large_pool.freed": 116477,
|
50 |
+
"allocation.large_pool.peak": 183,
|
51 |
+
"allocation.small_pool.allocated": 2959459,
|
52 |
+
"allocation.small_pool.current": 439,
|
53 |
+
"allocation.small_pool.freed": 2959020,
|
54 |
+
"allocation.small_pool.peak": 696,
|
55 |
+
"inactive_split.all.allocated": 1669133,
|
56 |
+
"inactive_split.all.current": 45,
|
57 |
+
"inactive_split.all.freed": 1669088,
|
58 |
+
"inactive_split.all.peak": 119,
|
59 |
+
"inactive_split.large_pool.allocated": 99599,
|
60 |
+
"inactive_split.large_pool.current": 19,
|
61 |
+
"inactive_split.large_pool.freed": 99580,
|
62 |
+
"inactive_split.large_pool.peak": 23,
|
63 |
+
"inactive_split.small_pool.allocated": 1569534,
|
64 |
+
"inactive_split.small_pool.current": 26,
|
65 |
+
"inactive_split.small_pool.freed": 1569508,
|
66 |
+
"inactive_split.small_pool.peak": 99,
|
67 |
+
"inactive_split_bytes.all.allocated": 1381495543808,
|
68 |
+
"inactive_split_bytes.all.current": 60147200,
|
69 |
+
"inactive_split_bytes.all.freed": 1381435396608,
|
70 |
+
"inactive_split_bytes.all.peak": 149930496,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 291350265856,
|
72 |
+
"inactive_split_bytes.large_pool.current": 48758784,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 291301507072,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 75759616,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 1090145277952,
|
76 |
+
"inactive_split_bytes.small_pool.current": 11388416,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 1090133889536,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 82985472,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_device_alloc": 97,
|
82 |
+
"num_device_free": 0,
|
83 |
+
"num_ooms": 0,
|
84 |
+
"num_sync_all_streams": 0,
|
85 |
+
"oversize_allocations.allocated": 0,
|
86 |
+
"oversize_allocations.current": 0,
|
87 |
+
"oversize_allocations.freed": 0,
|
88 |
+
"oversize_allocations.peak": 0,
|
89 |
+
"oversize_segments.allocated": 0,
|
90 |
+
"oversize_segments.current": 0,
|
91 |
+
"oversize_segments.freed": 0,
|
92 |
+
"oversize_segments.peak": 0,
|
93 |
+
"requested_bytes.all.allocated": 1243709959972,
|
94 |
+
"requested_bytes.all.current": 486047800,
|
95 |
+
"requested_bytes.all.freed": 1243223912172,
|
96 |
+
"requested_bytes.all.peak": 680092736,
|
97 |
+
"requested_bytes.large_pool.allocated": 210328229888,
|
98 |
+
"requested_bytes.large_pool.current": 463910912,
|
99 |
+
"requested_bytes.large_pool.freed": 209864318976,
|
100 |
+
"requested_bytes.large_pool.peak": 626776064,
|
101 |
+
"requested_bytes.small_pool.allocated": 1033381730084,
|
102 |
+
"requested_bytes.small_pool.current": 22136888,
|
103 |
+
"requested_bytes.small_pool.freed": 1033359593196,
|
104 |
+
"requested_bytes.small_pool.peak": 133501696,
|
105 |
+
"reserved_bytes.all.allocated": 805306368,
|
106 |
+
"reserved_bytes.all.current": 805306368,
|
107 |
+
"reserved_bytes.all.freed": 0,
|
108 |
+
"reserved_bytes.all.peak": 805306368,
|
109 |
+
"reserved_bytes.large_pool.allocated": 660602880,
|
110 |
+
"reserved_bytes.large_pool.current": 660602880,
|
111 |
+
"reserved_bytes.large_pool.freed": 0,
|
112 |
+
"reserved_bytes.large_pool.peak": 660602880,
|
113 |
+
"reserved_bytes.small_pool.allocated": 144703488,
|
114 |
+
"reserved_bytes.small_pool.current": 144703488,
|
115 |
+
"reserved_bytes.small_pool.freed": 0,
|
116 |
+
"reserved_bytes.small_pool.peak": 144703488,
|
117 |
+
"segment.all.allocated": 97,
|
118 |
+
"segment.all.current": 97,
|
119 |
+
"segment.all.freed": 0,
|
120 |
+
"segment.all.peak": 97,
|
121 |
+
"segment.large_pool.allocated": 28,
|
122 |
+
"segment.large_pool.current": 28,
|
123 |
+
"segment.large_pool.freed": 0,
|
124 |
+
"segment.large_pool.peak": 28,
|
125 |
+
"segment.small_pool.allocated": 69,
|
126 |
+
"segment.small_pool.current": 69,
|
127 |
+
"segment.small_pool.freed": 0,
|
128 |
+
"segment.small_pool.peak": 69
|
129 |
+
}
|
130 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/head_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"activation_function": "tanh",
|
4 |
+
"bias": true,
|
5 |
+
"dropout_prob": null,
|
6 |
+
"head_type": "classification",
|
7 |
+
"label2id": {
|
8 |
+
"LABEL_0": 0,
|
9 |
+
"LABEL_1": 1
|
10 |
+
},
|
11 |
+
"layers": 2,
|
12 |
+
"num_labels": 2,
|
13 |
+
"use_pooler": false
|
14 |
+
},
|
15 |
+
"hidden_size": 768,
|
16 |
+
"model_class": "BertAdapterModel",
|
17 |
+
"model_name": "bert-base-uncased",
|
18 |
+
"model_type": "bert",
|
19 |
+
"name": "cola",
|
20 |
+
"version": "0.2.1"
|
21 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_adapter.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9163ac81be9e722743c826d9388fa399fb25ae7e609d83b0a6725f868e26f98f
|
3 |
+
size 7191062
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/pytorch_model_head.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d0c4d71839d147c316ab8631c5c3b238e5632e23a39f4731f02a0bf6bf77a919
|
3 |
+
size 2370664
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"padding_side": "left",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/val_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_1999/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/adapter_config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"adapter_residual_before_ln": false,
|
4 |
+
"cross_adapter": false,
|
5 |
+
"dropout": 0.0,
|
6 |
+
"factorized_phm_W": true,
|
7 |
+
"factorized_phm_rule": false,
|
8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
9 |
+
"init_weights": "bert",
|
10 |
+
"inv_adapter": null,
|
11 |
+
"inv_adapter_reduction_factor": null,
|
12 |
+
"is_parallel": false,
|
13 |
+
"learn_phm": true,
|
14 |
+
"leave_out": [],
|
15 |
+
"ln_after": false,
|
16 |
+
"ln_before": false,
|
17 |
+
"mh_adapter": true,
|
18 |
+
"non_linearity": "swish",
|
19 |
+
"original_ln_after": true,
|
20 |
+
"original_ln_before": false,
|
21 |
+
"output_adapter": true,
|
22 |
+
"phm_bias": true,
|
23 |
+
"phm_c_init": "normal",
|
24 |
+
"phm_dim": 4,
|
25 |
+
"phm_init_range": 0.0001,
|
26 |
+
"phm_layer": false,
|
27 |
+
"phm_rank": 1,
|
28 |
+
"reduction_factor": 16,
|
29 |
+
"residual_before_ln": true,
|
30 |
+
"scaling": 1.0,
|
31 |
+
"shared_W_phm": false,
|
32 |
+
"shared_phm_rule": true,
|
33 |
+
"use_gating": false
|
34 |
+
},
|
35 |
+
"hidden_size": 768,
|
36 |
+
"model_class": "BertAdapterModel",
|
37 |
+
"model_name": "bert-base-uncased",
|
38 |
+
"model_type": "bert",
|
39 |
+
"name": "cola",
|
40 |
+
"version": "0.2.1"
|
41 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.542244787638552}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.5568270392858016}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/gpu_stats.json
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 487209472,
|
3 |
+
"max_memory_allocated": 699914240,
|
4 |
+
"memory_reserved": 805306368,
|
5 |
+
"max_memory_reserved": 805306368,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 5923424,
|
8 |
+
"active.all.current": 520,
|
9 |
+
"active.all.freed": 5922904,
|
10 |
+
"active.all.peak": 777,
|
11 |
+
"active.large_pool.allocated": 225064,
|
12 |
+
"active.large_pool.current": 81,
|
13 |
+
"active.large_pool.freed": 224983,
|
14 |
+
"active.large_pool.peak": 183,
|
15 |
+
"active.small_pool.allocated": 5698360,
|
16 |
+
"active.small_pool.current": 439,
|
17 |
+
"active.small_pool.freed": 5697921,
|
18 |
+
"active.small_pool.peak": 696,
|
19 |
+
"active_bytes.all.allocated": 2476015690752,
|
20 |
+
"active_bytes.all.current": 487209472,
|
21 |
+
"active_bytes.all.freed": 2475528481280,
|
22 |
+
"active_bytes.all.peak": 699914240,
|
23 |
+
"active_bytes.large_pool.allocated": 497251237888,
|
24 |
+
"active_bytes.large_pool.current": 465043456,
|
25 |
+
"active_bytes.large_pool.freed": 496786194432,
|
26 |
+
"active_bytes.large_pool.peak": 646561792,
|
27 |
+
"active_bytes.small_pool.allocated": 1978764452864,
|
28 |
+
"active_bytes.small_pool.current": 22166016,
|
29 |
+
"active_bytes.small_pool.freed": 1978742286848,
|
30 |
+
"active_bytes.small_pool.peak": 133540864,
|
31 |
+
"allocated_bytes.all.allocated": 2476015690752,
|
32 |
+
"allocated_bytes.all.current": 487209472,
|
33 |
+
"allocated_bytes.all.freed": 2475528481280,
|
34 |
+
"allocated_bytes.all.peak": 699914240,
|
35 |
+
"allocated_bytes.large_pool.allocated": 497251237888,
|
36 |
+
"allocated_bytes.large_pool.current": 465043456,
|
37 |
+
"allocated_bytes.large_pool.freed": 496786194432,
|
38 |
+
"allocated_bytes.large_pool.peak": 646561792,
|
39 |
+
"allocated_bytes.small_pool.allocated": 1978764452864,
|
40 |
+
"allocated_bytes.small_pool.current": 22166016,
|
41 |
+
"allocated_bytes.small_pool.freed": 1978742286848,
|
42 |
+
"allocated_bytes.small_pool.peak": 133540864,
|
43 |
+
"allocation.all.allocated": 5923424,
|
44 |
+
"allocation.all.current": 520,
|
45 |
+
"allocation.all.freed": 5922904,
|
46 |
+
"allocation.all.peak": 777,
|
47 |
+
"allocation.large_pool.allocated": 225064,
|
48 |
+
"allocation.large_pool.current": 81,
|
49 |
+
"allocation.large_pool.freed": 224983,
|
50 |
+
"allocation.large_pool.peak": 183,
|
51 |
+
"allocation.small_pool.allocated": 5698360,
|
52 |
+
"allocation.small_pool.current": 439,
|
53 |
+
"allocation.small_pool.freed": 5697921,
|
54 |
+
"allocation.small_pool.peak": 696,
|
55 |
+
"inactive_split.all.allocated": 3209672,
|
56 |
+
"inactive_split.all.current": 47,
|
57 |
+
"inactive_split.all.freed": 3209625,
|
58 |
+
"inactive_split.all.peak": 119,
|
59 |
+
"inactive_split.large_pool.allocated": 191602,
|
60 |
+
"inactive_split.large_pool.current": 19,
|
61 |
+
"inactive_split.large_pool.freed": 191583,
|
62 |
+
"inactive_split.large_pool.peak": 23,
|
63 |
+
"inactive_split.small_pool.allocated": 3018070,
|
64 |
+
"inactive_split.small_pool.current": 28,
|
65 |
+
"inactive_split.small_pool.freed": 3018042,
|
66 |
+
"inactive_split.small_pool.peak": 99,
|
67 |
+
"inactive_split_bytes.all.allocated": 2650731834368,
|
68 |
+
"inactive_split_bytes.all.current": 60147200,
|
69 |
+
"inactive_split_bytes.all.freed": 2650671687168,
|
70 |
+
"inactive_split_bytes.all.peak": 149930496,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 563739623424,
|
72 |
+
"inactive_split_bytes.large_pool.current": 48758784,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 563690864640,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 75759616,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 2086992210944,
|
76 |
+
"inactive_split_bytes.small_pool.current": 11388416,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 2086980822528,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 82985472,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_device_alloc": 97,
|
82 |
+
"num_device_free": 0,
|
83 |
+
"num_ooms": 0,
|
84 |
+
"num_sync_all_streams": 0,
|
85 |
+
"oversize_allocations.allocated": 0,
|
86 |
+
"oversize_allocations.current": 0,
|
87 |
+
"oversize_allocations.freed": 0,
|
88 |
+
"oversize_allocations.peak": 0,
|
89 |
+
"oversize_segments.allocated": 0,
|
90 |
+
"oversize_segments.current": 0,
|
91 |
+
"oversize_segments.freed": 0,
|
92 |
+
"oversize_segments.peak": 0,
|
93 |
+
"requested_bytes.all.allocated": 2385732458304,
|
94 |
+
"requested_bytes.all.current": 486047800,
|
95 |
+
"requested_bytes.all.freed": 2385246410504,
|
96 |
+
"requested_bytes.all.peak": 680092736,
|
97 |
+
"requested_bytes.large_pool.allocated": 407212120064,
|
98 |
+
"requested_bytes.large_pool.current": 463910912,
|
99 |
+
"requested_bytes.large_pool.freed": 406748209152,
|
100 |
+
"requested_bytes.large_pool.peak": 626776064,
|
101 |
+
"requested_bytes.small_pool.allocated": 1978520338240,
|
102 |
+
"requested_bytes.small_pool.current": 22136888,
|
103 |
+
"requested_bytes.small_pool.freed": 1978498201352,
|
104 |
+
"requested_bytes.small_pool.peak": 133501696,
|
105 |
+
"reserved_bytes.all.allocated": 805306368,
|
106 |
+
"reserved_bytes.all.current": 805306368,
|
107 |
+
"reserved_bytes.all.freed": 0,
|
108 |
+
"reserved_bytes.all.peak": 805306368,
|
109 |
+
"reserved_bytes.large_pool.allocated": 660602880,
|
110 |
+
"reserved_bytes.large_pool.current": 660602880,
|
111 |
+
"reserved_bytes.large_pool.freed": 0,
|
112 |
+
"reserved_bytes.large_pool.peak": 660602880,
|
113 |
+
"reserved_bytes.small_pool.allocated": 144703488,
|
114 |
+
"reserved_bytes.small_pool.current": 144703488,
|
115 |
+
"reserved_bytes.small_pool.freed": 0,
|
116 |
+
"reserved_bytes.small_pool.peak": 144703488,
|
117 |
+
"segment.all.allocated": 97,
|
118 |
+
"segment.all.current": 97,
|
119 |
+
"segment.all.freed": 0,
|
120 |
+
"segment.all.peak": 97,
|
121 |
+
"segment.large_pool.allocated": 28,
|
122 |
+
"segment.large_pool.current": 28,
|
123 |
+
"segment.large_pool.freed": 0,
|
124 |
+
"segment.large_pool.peak": 28,
|
125 |
+
"segment.small_pool.allocated": 69,
|
126 |
+
"segment.small_pool.current": 69,
|
127 |
+
"segment.small_pool.freed": 0,
|
128 |
+
"segment.small_pool.peak": 69
|
129 |
+
}
|
130 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/head_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"activation_function": "tanh",
|
4 |
+
"bias": true,
|
5 |
+
"dropout_prob": null,
|
6 |
+
"head_type": "classification",
|
7 |
+
"label2id": {
|
8 |
+
"LABEL_0": 0,
|
9 |
+
"LABEL_1": 1
|
10 |
+
},
|
11 |
+
"layers": 2,
|
12 |
+
"num_labels": 2,
|
13 |
+
"use_pooler": false
|
14 |
+
},
|
15 |
+
"hidden_size": 768,
|
16 |
+
"model_class": "BertAdapterModel",
|
17 |
+
"model_name": "bert-base-uncased",
|
18 |
+
"model_type": "bert",
|
19 |
+
"name": "cola",
|
20 |
+
"version": "0.2.1"
|
21 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_adapter.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fcd11f8a4b1606457a831d8f275103a705f9326c343919a8a5747ef886e615df
|
3 |
+
size 7191062
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/pytorch_model_head.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ba004d6f3992457f038f420e4b26e6f1aa219c44882b5ecc4a0006f0f61f760
|
3 |
+
size 2370664
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"padding_side": "left",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/val_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_3999/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/adapter_config.json
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"adapter_residual_before_ln": false,
|
4 |
+
"cross_adapter": false,
|
5 |
+
"dropout": 0.0,
|
6 |
+
"factorized_phm_W": true,
|
7 |
+
"factorized_phm_rule": false,
|
8 |
+
"hypercomplex_nonlinearity": "glorot-uniform",
|
9 |
+
"init_weights": "bert",
|
10 |
+
"inv_adapter": null,
|
11 |
+
"inv_adapter_reduction_factor": null,
|
12 |
+
"is_parallel": false,
|
13 |
+
"learn_phm": true,
|
14 |
+
"leave_out": [],
|
15 |
+
"ln_after": false,
|
16 |
+
"ln_before": false,
|
17 |
+
"mh_adapter": true,
|
18 |
+
"non_linearity": "swish",
|
19 |
+
"original_ln_after": true,
|
20 |
+
"original_ln_before": false,
|
21 |
+
"output_adapter": true,
|
22 |
+
"phm_bias": true,
|
23 |
+
"phm_c_init": "normal",
|
24 |
+
"phm_dim": 4,
|
25 |
+
"phm_init_range": 0.0001,
|
26 |
+
"phm_layer": false,
|
27 |
+
"phm_rank": 1,
|
28 |
+
"reduction_factor": 16,
|
29 |
+
"residual_before_ln": true,
|
30 |
+
"scaling": 1.0,
|
31 |
+
"shared_W_phm": false,
|
32 |
+
"shared_phm_rule": true,
|
33 |
+
"use_gating": false
|
34 |
+
},
|
35 |
+
"hidden_size": 768,
|
36 |
+
"model_class": "BertAdapterModel",
|
37 |
+
"model_name": "bert-base-uncased",
|
38 |
+
"model_type": "bert",
|
39 |
+
"name": "cola",
|
40 |
+
"version": "0.2.1"
|
41 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.5073664747016221}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.552840881500124}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/gpu_stats.json
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 487209472,
|
3 |
+
"max_memory_allocated": 699914240,
|
4 |
+
"memory_reserved": 805306368,
|
5 |
+
"max_memory_reserved": 805306368,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 8770831,
|
8 |
+
"active.all.current": 520,
|
9 |
+
"active.all.freed": 8770311,
|
10 |
+
"active.all.peak": 777,
|
11 |
+
"active.large_pool.allocated": 333282,
|
12 |
+
"active.large_pool.current": 81,
|
13 |
+
"active.large_pool.freed": 333201,
|
14 |
+
"active.large_pool.peak": 183,
|
15 |
+
"active.small_pool.allocated": 8437549,
|
16 |
+
"active.small_pool.current": 439,
|
17 |
+
"active.small_pool.freed": 8437110,
|
18 |
+
"active.small_pool.peak": 696,
|
19 |
+
"active_bytes.all.allocated": 3664038001152,
|
20 |
+
"active_bytes.all.current": 487209472,
|
21 |
+
"active_bytes.all.freed": 3663550791680,
|
22 |
+
"active_bytes.all.peak": 699914240,
|
23 |
+
"active_bytes.large_pool.allocated": 736794148864,
|
24 |
+
"active_bytes.large_pool.current": 465043456,
|
25 |
+
"active_bytes.large_pool.freed": 736329105408,
|
26 |
+
"active_bytes.large_pool.peak": 646561792,
|
27 |
+
"active_bytes.small_pool.allocated": 2927243852288,
|
28 |
+
"active_bytes.small_pool.current": 22166016,
|
29 |
+
"active_bytes.small_pool.freed": 2927221686272,
|
30 |
+
"active_bytes.small_pool.peak": 133540864,
|
31 |
+
"allocated_bytes.all.allocated": 3664038001152,
|
32 |
+
"allocated_bytes.all.current": 487209472,
|
33 |
+
"allocated_bytes.all.freed": 3663550791680,
|
34 |
+
"allocated_bytes.all.peak": 699914240,
|
35 |
+
"allocated_bytes.large_pool.allocated": 736794148864,
|
36 |
+
"allocated_bytes.large_pool.current": 465043456,
|
37 |
+
"allocated_bytes.large_pool.freed": 736329105408,
|
38 |
+
"allocated_bytes.large_pool.peak": 646561792,
|
39 |
+
"allocated_bytes.small_pool.allocated": 2927243852288,
|
40 |
+
"allocated_bytes.small_pool.current": 22166016,
|
41 |
+
"allocated_bytes.small_pool.freed": 2927221686272,
|
42 |
+
"allocated_bytes.small_pool.peak": 133540864,
|
43 |
+
"allocation.all.allocated": 8770831,
|
44 |
+
"allocation.all.current": 520,
|
45 |
+
"allocation.all.freed": 8770311,
|
46 |
+
"allocation.all.peak": 777,
|
47 |
+
"allocation.large_pool.allocated": 333282,
|
48 |
+
"allocation.large_pool.current": 81,
|
49 |
+
"allocation.large_pool.freed": 333201,
|
50 |
+
"allocation.large_pool.peak": 183,
|
51 |
+
"allocation.small_pool.allocated": 8437549,
|
52 |
+
"allocation.small_pool.current": 439,
|
53 |
+
"allocation.small_pool.freed": 8437110,
|
54 |
+
"allocation.small_pool.peak": 696,
|
55 |
+
"inactive_split.all.allocated": 4756567,
|
56 |
+
"inactive_split.all.current": 47,
|
57 |
+
"inactive_split.all.freed": 4756520,
|
58 |
+
"inactive_split.all.peak": 119,
|
59 |
+
"inactive_split.large_pool.allocated": 283096,
|
60 |
+
"inactive_split.large_pool.current": 19,
|
61 |
+
"inactive_split.large_pool.freed": 283077,
|
62 |
+
"inactive_split.large_pool.peak": 23,
|
63 |
+
"inactive_split.small_pool.allocated": 4473471,
|
64 |
+
"inactive_split.small_pool.current": 28,
|
65 |
+
"inactive_split.small_pool.freed": 4473443,
|
66 |
+
"inactive_split.small_pool.peak": 99,
|
67 |
+
"inactive_split_bytes.all.allocated": 3923585340928,
|
68 |
+
"inactive_split_bytes.all.current": 60147200,
|
69 |
+
"inactive_split_bytes.all.freed": 3923525193728,
|
70 |
+
"inactive_split_bytes.all.peak": 149930496,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 836194975744,
|
72 |
+
"inactive_split_bytes.large_pool.current": 48758784,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 836146216960,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 75759616,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 3087390365184,
|
76 |
+
"inactive_split_bytes.small_pool.current": 11388416,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 3087378976768,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 82985472,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_device_alloc": 97,
|
82 |
+
"num_device_free": 0,
|
83 |
+
"num_ooms": 0,
|
84 |
+
"num_sync_all_streams": 0,
|
85 |
+
"oversize_allocations.allocated": 0,
|
86 |
+
"oversize_allocations.current": 0,
|
87 |
+
"oversize_allocations.freed": 0,
|
88 |
+
"oversize_allocations.peak": 0,
|
89 |
+
"oversize_segments.allocated": 0,
|
90 |
+
"oversize_segments.current": 0,
|
91 |
+
"oversize_segments.freed": 0,
|
92 |
+
"oversize_segments.peak": 0,
|
93 |
+
"requested_bytes.all.allocated": 3531550399964,
|
94 |
+
"requested_bytes.all.current": 486047032,
|
95 |
+
"requested_bytes.all.freed": 3531064352932,
|
96 |
+
"requested_bytes.all.peak": 680092736,
|
97 |
+
"requested_bytes.large_pool.allocated": 604666959872,
|
98 |
+
"requested_bytes.large_pool.current": 463910912,
|
99 |
+
"requested_bytes.large_pool.freed": 604203048960,
|
100 |
+
"requested_bytes.large_pool.peak": 626776064,
|
101 |
+
"requested_bytes.small_pool.allocated": 2926883440092,
|
102 |
+
"requested_bytes.small_pool.current": 22136120,
|
103 |
+
"requested_bytes.small_pool.freed": 2926861303972,
|
104 |
+
"requested_bytes.small_pool.peak": 133501696,
|
105 |
+
"reserved_bytes.all.allocated": 805306368,
|
106 |
+
"reserved_bytes.all.current": 805306368,
|
107 |
+
"reserved_bytes.all.freed": 0,
|
108 |
+
"reserved_bytes.all.peak": 805306368,
|
109 |
+
"reserved_bytes.large_pool.allocated": 660602880,
|
110 |
+
"reserved_bytes.large_pool.current": 660602880,
|
111 |
+
"reserved_bytes.large_pool.freed": 0,
|
112 |
+
"reserved_bytes.large_pool.peak": 660602880,
|
113 |
+
"reserved_bytes.small_pool.allocated": 144703488,
|
114 |
+
"reserved_bytes.small_pool.current": 144703488,
|
115 |
+
"reserved_bytes.small_pool.freed": 0,
|
116 |
+
"reserved_bytes.small_pool.peak": 144703488,
|
117 |
+
"segment.all.allocated": 97,
|
118 |
+
"segment.all.current": 97,
|
119 |
+
"segment.all.freed": 0,
|
120 |
+
"segment.all.peak": 97,
|
121 |
+
"segment.large_pool.allocated": 28,
|
122 |
+
"segment.large_pool.current": 28,
|
123 |
+
"segment.large_pool.freed": 0,
|
124 |
+
"segment.large_pool.peak": 28,
|
125 |
+
"segment.small_pool.allocated": 69,
|
126 |
+
"segment.small_pool.current": 69,
|
127 |
+
"segment.small_pool.freed": 0,
|
128 |
+
"segment.small_pool.peak": 69
|
129 |
+
}
|
130 |
+
}
|
outputs/cola/bert-base-uncased_adapterstrain_val_0.0001_12345_8_10000/step_5999/head_config.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"config": {
|
3 |
+
"activation_function": "tanh",
|
4 |
+
"bias": true,
|
5 |
+
"dropout_prob": null,
|
6 |
+
"head_type": "classification",
|
7 |
+
"label2id": {
|
8 |
+
"LABEL_0": 0,
|
9 |
+
"LABEL_1": 1
|
10 |
+
},
|
11 |
+
"layers": 2,
|
12 |
+
"num_labels": 2,
|
13 |
+
"use_pooler": false
|
14 |
+
},
|
15 |
+
"hidden_size": 768,
|
16 |
+
"model_class": "BertAdapterModel",
|
17 |
+
"model_name": "bert-base-uncased",
|
18 |
+
"model_type": "bert",
|
19 |
+
"name": "cola",
|
20 |
+
"version": "0.2.1"
|
21 |
+
}
|