alpha experiments
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/args.json +35 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/logfile.log +137 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/README.md +202 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/adapter_config.json +30 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/adapter_model.safetensors +3 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/all_results.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/all_results_val.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/eval_res.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/eval_res_val.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/gpu_stats.json +127 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/special_tokens_map.json +7 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/tokenizer.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/tokenizer_config.json +56 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/vocab.txt +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/README.md +202 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/adapter_config.json +30 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/adapter_model.safetensors +3 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/all_results.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/all_results_val.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/eval_res.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/eval_res_val.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/gpu_stats.json +127 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/special_tokens_map.json +7 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/tokenizer.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/tokenizer_config.json +56 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/vocab.txt +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/README.md +202 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/adapter_config.json +30 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/adapter_model.safetensors +3 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/all_results.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/all_results_val.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/eval_res.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/eval_res_val.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/gpu_stats.json +127 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/special_tokens_map.json +7 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/tokenizer.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/tokenizer_config.json +56 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/vocab.txt +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/README.md +202 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/adapter_config.json +30 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/adapter_model.safetensors +3 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/all_results.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/all_results_val.json +1 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/eval_res.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/eval_res_val.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/gpu_stats.json +127 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/special_tokens_map.json +7 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/tokenizer.json +0 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/tokenizer_config.json +56 -0
- Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/vocab.txt +0 -0
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/args.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"task_name": "cola",
|
3 |
+
"train_file": null,
|
4 |
+
"validation_file": null,
|
5 |
+
"max_length": 256,
|
6 |
+
"pad_to_max_length": false,
|
7 |
+
"model_name_or_path": "google-bert/bert-base-uncased",
|
8 |
+
"use_slow_tokenizer": false,
|
9 |
+
"per_device_train_batch_size": 32,
|
10 |
+
"per_device_eval_batch_size": 32,
|
11 |
+
"learning_rate": 5e-05,
|
12 |
+
"max_grad_norm": 0.5,
|
13 |
+
"weight_decay": 0.0,
|
14 |
+
"num_train_epochs": 5,
|
15 |
+
"max_train_steps": null,
|
16 |
+
"gradient_accumulation_steps": 1,
|
17 |
+
"lr_scheduler_type": "linear",
|
18 |
+
"num_warmup_steps": 0,
|
19 |
+
"output_dir": "./outputs/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345",
|
20 |
+
"seed": 12345,
|
21 |
+
"push_to_hub": false,
|
22 |
+
"hub_model_id": null,
|
23 |
+
"hub_token": null,
|
24 |
+
"checkpointing_steps": null,
|
25 |
+
"resume_from_checkpoint": null,
|
26 |
+
"with_tracking": false,
|
27 |
+
"report_to": "all",
|
28 |
+
"ignore_mismatched_sizes": true,
|
29 |
+
"save_train_results": false,
|
30 |
+
"lora_r": 8,
|
31 |
+
"lora_alpha": 16,
|
32 |
+
"lora_dropout": 0.1,
|
33 |
+
"testing_set": "train_val",
|
34 |
+
"lm_head": true
|
35 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/logfile.log
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
04/30/2024 23:09:14 - INFO - __main__ - Number of labels detected = 2
|
2 |
+
04/30/2024 23:09:14 - INFO - __main__ - None
|
3 |
+
04/30/2024 23:09:15 - INFO - __main__ - Sample 3412 of the training set: {'input_ids': [101, 1045, 12781, 1996, 7427, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1], 'labels': 1}.
|
4 |
+
04/30/2024 23:09:15 - INFO - __main__ - Sample 6002 of the training set: {'input_ids': [101, 1045, 2442, 2064, 4521, 22088, 2015, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
|
5 |
+
04/30/2024 23:09:15 - INFO - __main__ - Sample 83 of the training set: {'input_ids': [101, 1996, 7764, 22257, 2993, 2000, 1996, 2598, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'labels': 0}.
|
6 |
+
04/30/2024 23:09:15 - INFO - __main__ - Max training steps before recalculation = None
|
7 |
+
04/30/2024 23:09:15 - INFO - __main__ - num_update_steps_per_epoch initial = 214
|
8 |
+
04/30/2024 23:09:15 - INFO - __main__ - num training epochs initial = 5
|
9 |
+
04/30/2024 23:09:15 - INFO - __main__ - Adjusted num_train_epochs based on max_train_steps: 5
|
10 |
+
04/30/2024 23:09:15 - INFO - __main__ - PeftModelForSequenceClassification(
|
11 |
+
(base_model): LoraModel(
|
12 |
+
(model): BertForSequenceClassification(
|
13 |
+
(bert): BertModel(
|
14 |
+
(embeddings): BertEmbeddings(
|
15 |
+
(word_embeddings): Embedding(30522, 768, padding_idx=0)
|
16 |
+
(position_embeddings): Embedding(512, 768)
|
17 |
+
(token_type_embeddings): Embedding(2, 768)
|
18 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
19 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
20 |
+
)
|
21 |
+
(encoder): BertEncoder(
|
22 |
+
(layer): ModuleList(
|
23 |
+
(0-11): 12 x BertLayer(
|
24 |
+
(attention): BertAttention(
|
25 |
+
(self): BertSelfAttention(
|
26 |
+
(query): lora.Linear(
|
27 |
+
(base_layer): Linear(in_features=768, out_features=768, bias=True)
|
28 |
+
(lora_dropout): ModuleDict(
|
29 |
+
(default): Dropout(p=0.1, inplace=False)
|
30 |
+
)
|
31 |
+
(lora_A): ModuleDict(
|
32 |
+
(default): Linear(in_features=768, out_features=8, bias=False)
|
33 |
+
)
|
34 |
+
(lora_B): ModuleDict(
|
35 |
+
(default): Linear(in_features=8, out_features=768, bias=False)
|
36 |
+
)
|
37 |
+
(lora_embedding_A): ParameterDict()
|
38 |
+
(lora_embedding_B): ParameterDict()
|
39 |
+
)
|
40 |
+
(key): Linear(in_features=768, out_features=768, bias=True)
|
41 |
+
(value): lora.Linear(
|
42 |
+
(base_layer): Linear(in_features=768, out_features=768, bias=True)
|
43 |
+
(lora_dropout): ModuleDict(
|
44 |
+
(default): Dropout(p=0.1, inplace=False)
|
45 |
+
)
|
46 |
+
(lora_A): ModuleDict(
|
47 |
+
(default): Linear(in_features=768, out_features=8, bias=False)
|
48 |
+
)
|
49 |
+
(lora_B): ModuleDict(
|
50 |
+
(default): Linear(in_features=8, out_features=768, bias=False)
|
51 |
+
)
|
52 |
+
(lora_embedding_A): ParameterDict()
|
53 |
+
(lora_embedding_B): ParameterDict()
|
54 |
+
)
|
55 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
56 |
+
)
|
57 |
+
(output): BertSelfOutput(
|
58 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
59 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
60 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
61 |
+
)
|
62 |
+
)
|
63 |
+
(intermediate): BertIntermediate(
|
64 |
+
(dense): Linear(in_features=768, out_features=3072, bias=True)
|
65 |
+
(intermediate_act_fn): GELUActivation()
|
66 |
+
)
|
67 |
+
(output): BertOutput(
|
68 |
+
(dense): Linear(in_features=3072, out_features=768, bias=True)
|
69 |
+
(LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)
|
70 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
71 |
+
)
|
72 |
+
)
|
73 |
+
)
|
74 |
+
)
|
75 |
+
(pooler): BertPooler(
|
76 |
+
(dense): Linear(in_features=768, out_features=768, bias=True)
|
77 |
+
(activation): Tanh()
|
78 |
+
)
|
79 |
+
)
|
80 |
+
(dropout): Dropout(p=0.1, inplace=False)
|
81 |
+
(classifier): ModulesToSaveWrapper(
|
82 |
+
(original_module): lora.Linear(
|
83 |
+
(base_layer): Linear(in_features=768, out_features=2, bias=True)
|
84 |
+
(lora_dropout): ModuleDict(
|
85 |
+
(default): Dropout(p=0.1, inplace=False)
|
86 |
+
)
|
87 |
+
(lora_A): ModuleDict(
|
88 |
+
(default): Linear(in_features=768, out_features=8, bias=False)
|
89 |
+
)
|
90 |
+
(lora_B): ModuleDict(
|
91 |
+
(default): Linear(in_features=8, out_features=2, bias=False)
|
92 |
+
)
|
93 |
+
(lora_embedding_A): ParameterDict()
|
94 |
+
(lora_embedding_B): ParameterDict()
|
95 |
+
)
|
96 |
+
(modules_to_save): ModuleDict(
|
97 |
+
(default): lora.Linear(
|
98 |
+
(base_layer): Linear(in_features=768, out_features=2, bias=True)
|
99 |
+
(lora_dropout): ModuleDict(
|
100 |
+
(default): Dropout(p=0.1, inplace=False)
|
101 |
+
)
|
102 |
+
(lora_A): ModuleDict(
|
103 |
+
(default): Linear(in_features=768, out_features=8, bias=False)
|
104 |
+
)
|
105 |
+
(lora_B): ModuleDict(
|
106 |
+
(default): Linear(in_features=8, out_features=2, bias=False)
|
107 |
+
)
|
108 |
+
(lora_embedding_A): ParameterDict()
|
109 |
+
(lora_embedding_B): ParameterDict()
|
110 |
+
)
|
111 |
+
)
|
112 |
+
)
|
113 |
+
)
|
114 |
+
)
|
115 |
+
)
|
116 |
+
04/30/2024 23:09:16 - INFO - __main__ - num_update_steps_per_epoch before recalculation = 214
|
117 |
+
04/30/2024 23:09:16 - INFO - __main__ - num_update_steps_per_epoch after recalculation = 214
|
118 |
+
04/30/2024 23:09:16 - INFO - __main__ - num training epochs before recalculation = 5
|
119 |
+
04/30/2024 23:09:16 - INFO - __main__ - ***** Running training *****
|
120 |
+
04/30/2024 23:09:16 - INFO - __main__ - Num examples = 6840
|
121 |
+
04/30/2024 23:09:16 - INFO - __main__ - Num Epochs = 5
|
122 |
+
04/30/2024 23:09:16 - INFO - __main__ - Instantaneous batch size per device = 32
|
123 |
+
04/30/2024 23:09:16 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 32
|
124 |
+
04/30/2024 23:09:16 - INFO - __main__ - Gradient Accumulation steps = 1
|
125 |
+
04/30/2024 23:09:16 - INFO - __main__ - Total optimization steps = 1070
|
126 |
+
04/30/2024 23:09:18 - INFO - __main__ - epoch 0: {'matthews_correlation': -0.008044856322926524}
|
127 |
+
04/30/2024 23:09:21 - INFO - __main__ - epoch 0: {'matthews_correlation': -0.008842368995538273}
|
128 |
+
04/30/2024 23:09:41 - INFO - __main__ - epoch 0: {'matthews_correlation': -0.020702674026557004}
|
129 |
+
04/30/2024 23:09:43 - INFO - __main__ - epoch 0: {'matthews_correlation': 0.0}
|
130 |
+
04/30/2024 23:10:04 - INFO - __main__ - epoch 1: {'matthews_correlation': 0.3476595303042622}
|
131 |
+
04/30/2024 23:10:07 - INFO - __main__ - epoch 1: {'matthews_correlation': 0.3805279319229517}
|
132 |
+
04/30/2024 23:10:27 - INFO - __main__ - epoch 2: {'matthews_correlation': 0.3944424231201585}
|
133 |
+
04/30/2024 23:10:29 - INFO - __main__ - epoch 2: {'matthews_correlation': 0.41664754833015816}
|
134 |
+
04/30/2024 23:10:49 - INFO - __main__ - epoch 3: {'matthews_correlation': 0.4035662082408423}
|
135 |
+
04/30/2024 23:10:52 - INFO - __main__ - epoch 3: {'matthews_correlation': 0.4253583776744412}
|
136 |
+
04/30/2024 23:11:12 - INFO - __main__ - epoch 4: {'matthews_correlation': 0.39743424346745876}
|
137 |
+
04/30/2024 23:11:14 - INFO - __main__ - epoch 4: {'matthews_correlation': 0.4154373562708837}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: google-bert/bert-base-uncased
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.10.0
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/adapter_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "google-bert/bert-base-uncased",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0.1,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"query",
|
24 |
+
"classifier",
|
25 |
+
"value"
|
26 |
+
],
|
27 |
+
"task_type": "SEQ_CLS",
|
28 |
+
"use_dora": false,
|
29 |
+
"use_rslora": false
|
30 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:55499920f591b60a49071fe59feafb9032e4823c660f03b68c110d357f749fa6
|
3 |
+
size 1267328
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": -0.008044856322926524}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": -0.008842368995538273}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/eval_res_val.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/gpu_stats.json
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 448859648,
|
3 |
+
"max_memory_allocated": 487060992,
|
4 |
+
"memory_reserved": 557842432,
|
5 |
+
"max_memory_reserved": 557842432,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 47793,
|
8 |
+
"active.all.current": 269,
|
9 |
+
"active.all.freed": 47524,
|
10 |
+
"active.all.peak": 283,
|
11 |
+
"active.large_pool.allocated": 23288,
|
12 |
+
"active.large_pool.current": 76,
|
13 |
+
"active.large_pool.freed": 23212,
|
14 |
+
"active.large_pool.peak": 85,
|
15 |
+
"active.small_pool.allocated": 24505,
|
16 |
+
"active.small_pool.current": 193,
|
17 |
+
"active.small_pool.freed": 24312,
|
18 |
+
"active.small_pool.peak": 207,
|
19 |
+
"active_bytes.all.allocated": 72107065344,
|
20 |
+
"active_bytes.all.current": 448859648,
|
21 |
+
"active_bytes.all.freed": 71658205696,
|
22 |
+
"active_bytes.all.peak": 487060992,
|
23 |
+
"active_bytes.large_pool.allocated": 63002771456,
|
24 |
+
"active_bytes.large_pool.current": 447086592,
|
25 |
+
"active_bytes.large_pool.freed": 62555684864,
|
26 |
+
"active_bytes.large_pool.peak": 485261312,
|
27 |
+
"active_bytes.small_pool.allocated": 9104293888,
|
28 |
+
"active_bytes.small_pool.current": 1773056,
|
29 |
+
"active_bytes.small_pool.freed": 9102520832,
|
30 |
+
"active_bytes.small_pool.peak": 7395328,
|
31 |
+
"allocated_bytes.all.allocated": 72107065344,
|
32 |
+
"allocated_bytes.all.current": 448859648,
|
33 |
+
"allocated_bytes.all.freed": 71658205696,
|
34 |
+
"allocated_bytes.all.peak": 487060992,
|
35 |
+
"allocated_bytes.large_pool.allocated": 63002771456,
|
36 |
+
"allocated_bytes.large_pool.current": 447086592,
|
37 |
+
"allocated_bytes.large_pool.freed": 62555684864,
|
38 |
+
"allocated_bytes.large_pool.peak": 485261312,
|
39 |
+
"allocated_bytes.small_pool.allocated": 9104293888,
|
40 |
+
"allocated_bytes.small_pool.current": 1773056,
|
41 |
+
"allocated_bytes.small_pool.freed": 9102520832,
|
42 |
+
"allocated_bytes.small_pool.peak": 7395328,
|
43 |
+
"allocation.all.allocated": 47793,
|
44 |
+
"allocation.all.current": 269,
|
45 |
+
"allocation.all.freed": 47524,
|
46 |
+
"allocation.all.peak": 283,
|
47 |
+
"allocation.large_pool.allocated": 23288,
|
48 |
+
"allocation.large_pool.current": 76,
|
49 |
+
"allocation.large_pool.freed": 23212,
|
50 |
+
"allocation.large_pool.peak": 85,
|
51 |
+
"allocation.small_pool.allocated": 24505,
|
52 |
+
"allocation.small_pool.current": 193,
|
53 |
+
"allocation.small_pool.freed": 24312,
|
54 |
+
"allocation.small_pool.peak": 207,
|
55 |
+
"inactive_split.all.allocated": 35980,
|
56 |
+
"inactive_split.all.current": 22,
|
57 |
+
"inactive_split.all.freed": 35958,
|
58 |
+
"inactive_split.all.peak": 29,
|
59 |
+
"inactive_split.large_pool.allocated": 20697,
|
60 |
+
"inactive_split.large_pool.current": 18,
|
61 |
+
"inactive_split.large_pool.freed": 20679,
|
62 |
+
"inactive_split.large_pool.peak": 24,
|
63 |
+
"inactive_split.small_pool.allocated": 15283,
|
64 |
+
"inactive_split.small_pool.current": 4,
|
65 |
+
"inactive_split.small_pool.freed": 15279,
|
66 |
+
"inactive_split.small_pool.peak": 11,
|
67 |
+
"inactive_split_bytes.all.allocated": 75802501120,
|
68 |
+
"inactive_split_bytes.all.current": 46068224,
|
69 |
+
"inactive_split_bytes.all.freed": 75756432896,
|
70 |
+
"inactive_split_bytes.all.peak": 91133952,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 65641709568,
|
72 |
+
"inactive_split_bytes.large_pool.current": 45744128,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 65595965440,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 90832896,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 10160791552,
|
76 |
+
"inactive_split_bytes.small_pool.current": 324096,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 10160467456,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 5386240,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_ooms": 0,
|
82 |
+
"oversize_allocations.allocated": 0,
|
83 |
+
"oversize_allocations.current": 0,
|
84 |
+
"oversize_allocations.freed": 0,
|
85 |
+
"oversize_allocations.peak": 0,
|
86 |
+
"oversize_segments.allocated": 0,
|
87 |
+
"oversize_segments.current": 0,
|
88 |
+
"oversize_segments.freed": 0,
|
89 |
+
"oversize_segments.peak": 0,
|
90 |
+
"requested_bytes.all.allocated": 65332312048,
|
91 |
+
"requested_bytes.all.current": 447722516,
|
92 |
+
"requested_bytes.all.freed": 64884589532,
|
93 |
+
"requested_bytes.all.peak": 485596180,
|
94 |
+
"requested_bytes.large_pool.allocated": 56231950336,
|
95 |
+
"requested_bytes.large_pool.current": 445954048,
|
96 |
+
"requested_bytes.large_pool.freed": 55785996288,
|
97 |
+
"requested_bytes.large_pool.peak": 483801088,
|
98 |
+
"requested_bytes.small_pool.allocated": 9100361712,
|
99 |
+
"requested_bytes.small_pool.current": 1768468,
|
100 |
+
"requested_bytes.small_pool.freed": 9098593244,
|
101 |
+
"requested_bytes.small_pool.peak": 7390288,
|
102 |
+
"reserved_bytes.all.allocated": 557842432,
|
103 |
+
"reserved_bytes.all.current": 557842432,
|
104 |
+
"reserved_bytes.all.freed": 0,
|
105 |
+
"reserved_bytes.all.peak": 557842432,
|
106 |
+
"reserved_bytes.large_pool.allocated": 547356672,
|
107 |
+
"reserved_bytes.large_pool.current": 547356672,
|
108 |
+
"reserved_bytes.large_pool.freed": 0,
|
109 |
+
"reserved_bytes.large_pool.peak": 547356672,
|
110 |
+
"reserved_bytes.small_pool.allocated": 10485760,
|
111 |
+
"reserved_bytes.small_pool.current": 10485760,
|
112 |
+
"reserved_bytes.small_pool.freed": 0,
|
113 |
+
"reserved_bytes.small_pool.peak": 10485760,
|
114 |
+
"segment.all.allocated": 28,
|
115 |
+
"segment.all.current": 28,
|
116 |
+
"segment.all.freed": 0,
|
117 |
+
"segment.all.peak": 28,
|
118 |
+
"segment.large_pool.allocated": 23,
|
119 |
+
"segment.large_pool.current": 23,
|
120 |
+
"segment.large_pool.freed": 0,
|
121 |
+
"segment.large_pool.peak": 23,
|
122 |
+
"segment.small_pool.allocated": 5,
|
123 |
+
"segment.small_pool.current": 5,
|
124 |
+
"segment.small_pool.freed": 0,
|
125 |
+
"segment.small_pool.peak": 5
|
126 |
+
}
|
127 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"padding_side": "left",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_0/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: google-bert/bert-base-uncased
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.10.0
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/adapter_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "google-bert/bert-base-uncased",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0.1,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"query",
|
24 |
+
"classifier",
|
25 |
+
"value"
|
26 |
+
],
|
27 |
+
"task_type": "SEQ_CLS",
|
28 |
+
"use_dora": false,
|
29 |
+
"use_rslora": false
|
30 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9bca03cea26f6938a67b6c814138fa26e945e3c33cd5208dd34e15a7e54d3ceb
|
3 |
+
size 1267328
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.39743424346745876}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.4154373562708837}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/eval_res_val.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/gpu_stats.json
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 459795968,
|
3 |
+
"max_memory_allocated": 1279719424,
|
4 |
+
"memory_reserved": 1379926016,
|
5 |
+
"max_memory_reserved": 1379926016,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 1456108,
|
8 |
+
"active.all.current": 374,
|
9 |
+
"active.all.freed": 1455734,
|
10 |
+
"active.all.peak": 637,
|
11 |
+
"active.large_pool.allocated": 804296,
|
12 |
+
"active.large_pool.current": 77,
|
13 |
+
"active.large_pool.freed": 804219,
|
14 |
+
"active.large_pool.peak": 248,
|
15 |
+
"active.small_pool.allocated": 651812,
|
16 |
+
"active.small_pool.current": 297,
|
17 |
+
"active.small_pool.freed": 651515,
|
18 |
+
"active.small_pool.peak": 465,
|
19 |
+
"active_bytes.all.allocated": 2561023614464,
|
20 |
+
"active_bytes.all.current": 459795968,
|
21 |
+
"active_bytes.all.freed": 2560563818496,
|
22 |
+
"active_bytes.all.peak": 1279719424,
|
23 |
+
"active_bytes.large_pool.allocated": 2308491953152,
|
24 |
+
"active_bytes.large_pool.current": 455606272,
|
25 |
+
"active_bytes.large_pool.freed": 2308036346880,
|
26 |
+
"active_bytes.large_pool.peak": 1263869952,
|
27 |
+
"active_bytes.small_pool.allocated": 252531661312,
|
28 |
+
"active_bytes.small_pool.current": 4189696,
|
29 |
+
"active_bytes.small_pool.freed": 252527471616,
|
30 |
+
"active_bytes.small_pool.peak": 63705088,
|
31 |
+
"allocated_bytes.all.allocated": 2561023614464,
|
32 |
+
"allocated_bytes.all.current": 459795968,
|
33 |
+
"allocated_bytes.all.freed": 2560563818496,
|
34 |
+
"allocated_bytes.all.peak": 1279719424,
|
35 |
+
"allocated_bytes.large_pool.allocated": 2308491953152,
|
36 |
+
"allocated_bytes.large_pool.current": 455606272,
|
37 |
+
"allocated_bytes.large_pool.freed": 2308036346880,
|
38 |
+
"allocated_bytes.large_pool.peak": 1263869952,
|
39 |
+
"allocated_bytes.small_pool.allocated": 252531661312,
|
40 |
+
"allocated_bytes.small_pool.current": 4189696,
|
41 |
+
"allocated_bytes.small_pool.freed": 252527471616,
|
42 |
+
"allocated_bytes.small_pool.peak": 63705088,
|
43 |
+
"allocation.all.allocated": 1456108,
|
44 |
+
"allocation.all.current": 374,
|
45 |
+
"allocation.all.freed": 1455734,
|
46 |
+
"allocation.all.peak": 637,
|
47 |
+
"allocation.large_pool.allocated": 804296,
|
48 |
+
"allocation.large_pool.current": 77,
|
49 |
+
"allocation.large_pool.freed": 804219,
|
50 |
+
"allocation.large_pool.peak": 248,
|
51 |
+
"allocation.small_pool.allocated": 651812,
|
52 |
+
"allocation.small_pool.current": 297,
|
53 |
+
"allocation.small_pool.freed": 651515,
|
54 |
+
"allocation.small_pool.peak": 465,
|
55 |
+
"inactive_split.all.allocated": 854953,
|
56 |
+
"inactive_split.all.current": 33,
|
57 |
+
"inactive_split.all.freed": 854920,
|
58 |
+
"inactive_split.all.peak": 85,
|
59 |
+
"inactive_split.large_pool.allocated": 632651,
|
60 |
+
"inactive_split.large_pool.current": 19,
|
61 |
+
"inactive_split.large_pool.freed": 632632,
|
62 |
+
"inactive_split.large_pool.peak": 41,
|
63 |
+
"inactive_split.small_pool.allocated": 222302,
|
64 |
+
"inactive_split.small_pool.current": 14,
|
65 |
+
"inactive_split.small_pool.freed": 222288,
|
66 |
+
"inactive_split.small_pool.peak": 54,
|
67 |
+
"inactive_split_bytes.all.allocated": 2699003104768,
|
68 |
+
"inactive_split_bytes.all.current": 70783488,
|
69 |
+
"inactive_split_bytes.all.freed": 2698932321280,
|
70 |
+
"inactive_split_bytes.all.peak": 130352640,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 2424616453120,
|
72 |
+
"inactive_split_bytes.large_pool.current": 58195968,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 2424558257152,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 116785152,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 274386651648,
|
76 |
+
"inactive_split_bytes.small_pool.current": 12587520,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 274374064128,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 37126144,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_ooms": 0,
|
82 |
+
"oversize_allocations.allocated": 0,
|
83 |
+
"oversize_allocations.current": 0,
|
84 |
+
"oversize_allocations.freed": 0,
|
85 |
+
"oversize_allocations.peak": 0,
|
86 |
+
"oversize_segments.allocated": 0,
|
87 |
+
"oversize_segments.current": 0,
|
88 |
+
"oversize_segments.freed": 0,
|
89 |
+
"oversize_segments.peak": 0,
|
90 |
+
"requested_bytes.all.allocated": 2457172108420,
|
91 |
+
"requested_bytes.all.current": 458656676,
|
92 |
+
"requested_bytes.all.freed": 2456713451744,
|
93 |
+
"requested_bytes.all.peak": 1251715632,
|
94 |
+
"requested_bytes.large_pool.allocated": 2204685203456,
|
95 |
+
"requested_bytes.large_pool.current": 454473728,
|
96 |
+
"requested_bytes.large_pool.freed": 2204230729728,
|
97 |
+
"requested_bytes.large_pool.peak": 1235879936,
|
98 |
+
"requested_bytes.small_pool.allocated": 252486904964,
|
99 |
+
"requested_bytes.small_pool.current": 4182948,
|
100 |
+
"requested_bytes.small_pool.freed": 252482722016,
|
101 |
+
"requested_bytes.small_pool.peak": 63687344,
|
102 |
+
"reserved_bytes.all.allocated": 1379926016,
|
103 |
+
"reserved_bytes.all.current": 1379926016,
|
104 |
+
"reserved_bytes.all.freed": 0,
|
105 |
+
"reserved_bytes.all.peak": 1379926016,
|
106 |
+
"reserved_bytes.large_pool.allocated": 1308622848,
|
107 |
+
"reserved_bytes.large_pool.current": 1308622848,
|
108 |
+
"reserved_bytes.large_pool.freed": 0,
|
109 |
+
"reserved_bytes.large_pool.peak": 1308622848,
|
110 |
+
"reserved_bytes.small_pool.allocated": 71303168,
|
111 |
+
"reserved_bytes.small_pool.current": 71303168,
|
112 |
+
"reserved_bytes.small_pool.freed": 0,
|
113 |
+
"reserved_bytes.small_pool.peak": 71303168,
|
114 |
+
"segment.all.allocated": 97,
|
115 |
+
"segment.all.current": 97,
|
116 |
+
"segment.all.freed": 0,
|
117 |
+
"segment.all.peak": 97,
|
118 |
+
"segment.large_pool.allocated": 63,
|
119 |
+
"segment.large_pool.current": 63,
|
120 |
+
"segment.large_pool.freed": 0,
|
121 |
+
"segment.large_pool.peak": 63,
|
122 |
+
"segment.small_pool.allocated": 34,
|
123 |
+
"segment.small_pool.current": 34,
|
124 |
+
"segment.small_pool.freed": 0,
|
125 |
+
"segment.small_pool.peak": 34
|
126 |
+
}
|
127 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"padding_side": "left",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_1069/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: google-bert/bert-base-uncased
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.10.0
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/adapter_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "google-bert/bert-base-uncased",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0.1,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"query",
|
24 |
+
"classifier",
|
25 |
+
"value"
|
26 |
+
],
|
27 |
+
"task_type": "SEQ_CLS",
|
28 |
+
"use_dora": false,
|
29 |
+
"use_rslora": false
|
30 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0849fa2d0971cd506ac8351fe5d80b1d5d369587a4d19342f87c358c52d44e94
|
3 |
+
size 1267328
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": -0.020702674026557004}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.0}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/eval_res_val.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/gpu_stats.json
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 459792896,
|
3 |
+
"max_memory_allocated": 1279719424,
|
4 |
+
"memory_reserved": 1379926016,
|
5 |
+
"max_memory_reserved": 1379926016,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 328664,
|
8 |
+
"active.all.current": 374,
|
9 |
+
"active.all.freed": 328290,
|
10 |
+
"active.all.peak": 637,
|
11 |
+
"active.large_pool.allocated": 178844,
|
12 |
+
"active.large_pool.current": 77,
|
13 |
+
"active.large_pool.freed": 178767,
|
14 |
+
"active.large_pool.peak": 248,
|
15 |
+
"active.small_pool.allocated": 149820,
|
16 |
+
"active.small_pool.current": 297,
|
17 |
+
"active.small_pool.freed": 149523,
|
18 |
+
"active.small_pool.peak": 465,
|
19 |
+
"active_bytes.all.allocated": 569097968128,
|
20 |
+
"active_bytes.all.current": 459792896,
|
21 |
+
"active_bytes.all.freed": 568638175232,
|
22 |
+
"active_bytes.all.peak": 1279719424,
|
23 |
+
"active_bytes.large_pool.allocated": 511203555840,
|
24 |
+
"active_bytes.large_pool.current": 455606272,
|
25 |
+
"active_bytes.large_pool.freed": 510747949568,
|
26 |
+
"active_bytes.large_pool.peak": 1263869952,
|
27 |
+
"active_bytes.small_pool.allocated": 57894412288,
|
28 |
+
"active_bytes.small_pool.current": 4186624,
|
29 |
+
"active_bytes.small_pool.freed": 57890225664,
|
30 |
+
"active_bytes.small_pool.peak": 63705088,
|
31 |
+
"allocated_bytes.all.allocated": 569097968128,
|
32 |
+
"allocated_bytes.all.current": 459792896,
|
33 |
+
"allocated_bytes.all.freed": 568638175232,
|
34 |
+
"allocated_bytes.all.peak": 1279719424,
|
35 |
+
"allocated_bytes.large_pool.allocated": 511203555840,
|
36 |
+
"allocated_bytes.large_pool.current": 455606272,
|
37 |
+
"allocated_bytes.large_pool.freed": 510747949568,
|
38 |
+
"allocated_bytes.large_pool.peak": 1263869952,
|
39 |
+
"allocated_bytes.small_pool.allocated": 57894412288,
|
40 |
+
"allocated_bytes.small_pool.current": 4186624,
|
41 |
+
"allocated_bytes.small_pool.freed": 57890225664,
|
42 |
+
"allocated_bytes.small_pool.peak": 63705088,
|
43 |
+
"allocation.all.allocated": 328664,
|
44 |
+
"allocation.all.current": 374,
|
45 |
+
"allocation.all.freed": 328290,
|
46 |
+
"allocation.all.peak": 637,
|
47 |
+
"allocation.large_pool.allocated": 178844,
|
48 |
+
"allocation.large_pool.current": 77,
|
49 |
+
"allocation.large_pool.freed": 178767,
|
50 |
+
"allocation.large_pool.peak": 248,
|
51 |
+
"allocation.small_pool.allocated": 149820,
|
52 |
+
"allocation.small_pool.current": 297,
|
53 |
+
"allocation.small_pool.freed": 149523,
|
54 |
+
"allocation.small_pool.peak": 465,
|
55 |
+
"inactive_split.all.allocated": 198251,
|
56 |
+
"inactive_split.all.current": 34,
|
57 |
+
"inactive_split.all.freed": 198217,
|
58 |
+
"inactive_split.all.peak": 85,
|
59 |
+
"inactive_split.large_pool.allocated": 142310,
|
60 |
+
"inactive_split.large_pool.current": 19,
|
61 |
+
"inactive_split.large_pool.freed": 142291,
|
62 |
+
"inactive_split.large_pool.peak": 41,
|
63 |
+
"inactive_split.small_pool.allocated": 55941,
|
64 |
+
"inactive_split.small_pool.current": 15,
|
65 |
+
"inactive_split.small_pool.freed": 55926,
|
66 |
+
"inactive_split.small_pool.peak": 54,
|
67 |
+
"inactive_split_bytes.all.allocated": 599748315136,
|
68 |
+
"inactive_split_bytes.all.current": 70786560,
|
69 |
+
"inactive_split_bytes.all.freed": 599677528576,
|
70 |
+
"inactive_split_bytes.all.peak": 130352640,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 536647182848,
|
72 |
+
"inactive_split_bytes.large_pool.current": 58195968,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 536588986880,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 116785152,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 63101132288,
|
76 |
+
"inactive_split_bytes.small_pool.current": 12590592,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 63088541696,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 37076992,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_ooms": 0,
|
82 |
+
"oversize_allocations.allocated": 0,
|
83 |
+
"oversize_allocations.current": 0,
|
84 |
+
"oversize_allocations.freed": 0,
|
85 |
+
"oversize_allocations.peak": 0,
|
86 |
+
"oversize_segments.allocated": 0,
|
87 |
+
"oversize_segments.current": 0,
|
88 |
+
"oversize_segments.freed": 0,
|
89 |
+
"oversize_segments.peak": 0,
|
90 |
+
"requested_bytes.all.allocated": 543116634212,
|
91 |
+
"requested_bytes.all.current": 458653796,
|
92 |
+
"requested_bytes.all.freed": 542657980416,
|
93 |
+
"requested_bytes.all.peak": 1251715632,
|
94 |
+
"requested_bytes.large_pool.allocated": 485234358272,
|
95 |
+
"requested_bytes.large_pool.current": 454473728,
|
96 |
+
"requested_bytes.large_pool.freed": 484779884544,
|
97 |
+
"requested_bytes.large_pool.peak": 1235879936,
|
98 |
+
"requested_bytes.small_pool.allocated": 57882275940,
|
99 |
+
"requested_bytes.small_pool.current": 4180068,
|
100 |
+
"requested_bytes.small_pool.freed": 57878095872,
|
101 |
+
"requested_bytes.small_pool.peak": 63687344,
|
102 |
+
"reserved_bytes.all.allocated": 1379926016,
|
103 |
+
"reserved_bytes.all.current": 1379926016,
|
104 |
+
"reserved_bytes.all.freed": 0,
|
105 |
+
"reserved_bytes.all.peak": 1379926016,
|
106 |
+
"reserved_bytes.large_pool.allocated": 1308622848,
|
107 |
+
"reserved_bytes.large_pool.current": 1308622848,
|
108 |
+
"reserved_bytes.large_pool.freed": 0,
|
109 |
+
"reserved_bytes.large_pool.peak": 1308622848,
|
110 |
+
"reserved_bytes.small_pool.allocated": 71303168,
|
111 |
+
"reserved_bytes.small_pool.current": 71303168,
|
112 |
+
"reserved_bytes.small_pool.freed": 0,
|
113 |
+
"reserved_bytes.small_pool.peak": 71303168,
|
114 |
+
"segment.all.allocated": 97,
|
115 |
+
"segment.all.current": 97,
|
116 |
+
"segment.all.freed": 0,
|
117 |
+
"segment.all.peak": 97,
|
118 |
+
"segment.large_pool.allocated": 63,
|
119 |
+
"segment.large_pool.current": 63,
|
120 |
+
"segment.large_pool.freed": 0,
|
121 |
+
"segment.large_pool.peak": 63,
|
122 |
+
"segment.small_pool.allocated": 34,
|
123 |
+
"segment.small_pool.current": 34,
|
124 |
+
"segment.small_pool.freed": 0,
|
125 |
+
"segment.small_pool.peak": 34
|
126 |
+
}
|
127 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"padding_side": "left",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_213/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/README.md
ADDED
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: peft
|
3 |
+
base_model: google-bert/bert-base-uncased
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
200 |
+
### Framework versions
|
201 |
+
|
202 |
+
- PEFT 0.10.0
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/adapter_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "google-bert/bert-base-uncased",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": true,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 16,
|
14 |
+
"lora_dropout": 0.1,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 8,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"query",
|
24 |
+
"classifier",
|
25 |
+
"value"
|
26 |
+
],
|
27 |
+
"task_type": "SEQ_CLS",
|
28 |
+
"use_dora": false,
|
29 |
+
"use_rslora": false
|
30 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c4b16201607d198865d3765e0313d6fbfcfc5b2f963dfb3b569ce0ea12be154
|
3 |
+
size 1267328
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/all_results.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.3476595303042622}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/all_results_val.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"eval_matthews_correlation": 0.3805279319229517}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/eval_res.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/eval_res_val.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/gpu_stats.json
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"memory_allocated": 459800576,
|
3 |
+
"max_memory_allocated": 1279719424,
|
4 |
+
"memory_reserved": 1379926016,
|
5 |
+
"max_memory_reserved": 1379926016,
|
6 |
+
"memory_stats": {
|
7 |
+
"active.all.allocated": 610525,
|
8 |
+
"active.all.current": 374,
|
9 |
+
"active.all.freed": 610151,
|
10 |
+
"active.all.peak": 637,
|
11 |
+
"active.large_pool.allocated": 334997,
|
12 |
+
"active.large_pool.current": 77,
|
13 |
+
"active.large_pool.freed": 334920,
|
14 |
+
"active.large_pool.peak": 248,
|
15 |
+
"active.small_pool.allocated": 275528,
|
16 |
+
"active.small_pool.current": 297,
|
17 |
+
"active.small_pool.freed": 275231,
|
18 |
+
"active.small_pool.peak": 465,
|
19 |
+
"active_bytes.all.allocated": 1067720315392,
|
20 |
+
"active_bytes.all.current": 459800576,
|
21 |
+
"active_bytes.all.freed": 1067260514816,
|
22 |
+
"active_bytes.all.peak": 1279719424,
|
23 |
+
"active_bytes.large_pool.allocated": 960852216320,
|
24 |
+
"active_bytes.large_pool.current": 455606272,
|
25 |
+
"active_bytes.large_pool.freed": 960396610048,
|
26 |
+
"active_bytes.large_pool.peak": 1263869952,
|
27 |
+
"active_bytes.small_pool.allocated": 106868099072,
|
28 |
+
"active_bytes.small_pool.current": 4194304,
|
29 |
+
"active_bytes.small_pool.freed": 106863904768,
|
30 |
+
"active_bytes.small_pool.peak": 63705088,
|
31 |
+
"allocated_bytes.all.allocated": 1067720315392,
|
32 |
+
"allocated_bytes.all.current": 459800576,
|
33 |
+
"allocated_bytes.all.freed": 1067260514816,
|
34 |
+
"allocated_bytes.all.peak": 1279719424,
|
35 |
+
"allocated_bytes.large_pool.allocated": 960852216320,
|
36 |
+
"allocated_bytes.large_pool.current": 455606272,
|
37 |
+
"allocated_bytes.large_pool.freed": 960396610048,
|
38 |
+
"allocated_bytes.large_pool.peak": 1263869952,
|
39 |
+
"allocated_bytes.small_pool.allocated": 106868099072,
|
40 |
+
"allocated_bytes.small_pool.current": 4194304,
|
41 |
+
"allocated_bytes.small_pool.freed": 106863904768,
|
42 |
+
"allocated_bytes.small_pool.peak": 63705088,
|
43 |
+
"allocation.all.allocated": 610525,
|
44 |
+
"allocation.all.current": 374,
|
45 |
+
"allocation.all.freed": 610151,
|
46 |
+
"allocation.all.peak": 637,
|
47 |
+
"allocation.large_pool.allocated": 334997,
|
48 |
+
"allocation.large_pool.current": 77,
|
49 |
+
"allocation.large_pool.freed": 334920,
|
50 |
+
"allocation.large_pool.peak": 248,
|
51 |
+
"allocation.small_pool.allocated": 275528,
|
52 |
+
"allocation.small_pool.current": 297,
|
53 |
+
"allocation.small_pool.freed": 275231,
|
54 |
+
"allocation.small_pool.peak": 465,
|
55 |
+
"inactive_split.all.allocated": 362858,
|
56 |
+
"inactive_split.all.current": 34,
|
57 |
+
"inactive_split.all.freed": 362824,
|
58 |
+
"inactive_split.all.peak": 85,
|
59 |
+
"inactive_split.large_pool.allocated": 264638,
|
60 |
+
"inactive_split.large_pool.current": 19,
|
61 |
+
"inactive_split.large_pool.freed": 264619,
|
62 |
+
"inactive_split.large_pool.peak": 41,
|
63 |
+
"inactive_split.small_pool.allocated": 98220,
|
64 |
+
"inactive_split.small_pool.current": 15,
|
65 |
+
"inactive_split.small_pool.freed": 98205,
|
66 |
+
"inactive_split.small_pool.peak": 54,
|
67 |
+
"inactive_split_bytes.all.allocated": 1125091670528,
|
68 |
+
"inactive_split_bytes.all.current": 70778880,
|
69 |
+
"inactive_split_bytes.all.freed": 1125020891648,
|
70 |
+
"inactive_split_bytes.all.peak": 130352640,
|
71 |
+
"inactive_split_bytes.large_pool.allocated": 1008808340992,
|
72 |
+
"inactive_split_bytes.large_pool.current": 58195968,
|
73 |
+
"inactive_split_bytes.large_pool.freed": 1008750145024,
|
74 |
+
"inactive_split_bytes.large_pool.peak": 116785152,
|
75 |
+
"inactive_split_bytes.small_pool.allocated": 116283329536,
|
76 |
+
"inactive_split_bytes.small_pool.current": 12582912,
|
77 |
+
"inactive_split_bytes.small_pool.freed": 116270746624,
|
78 |
+
"inactive_split_bytes.small_pool.peak": 37076992,
|
79 |
+
"max_split_size": -1,
|
80 |
+
"num_alloc_retries": 0,
|
81 |
+
"num_ooms": 0,
|
82 |
+
"oversize_allocations.allocated": 0,
|
83 |
+
"oversize_allocations.current": 0,
|
84 |
+
"oversize_allocations.freed": 0,
|
85 |
+
"oversize_allocations.peak": 0,
|
86 |
+
"oversize_segments.allocated": 0,
|
87 |
+
"oversize_segments.current": 0,
|
88 |
+
"oversize_segments.freed": 0,
|
89 |
+
"oversize_segments.peak": 0,
|
90 |
+
"requested_bytes.all.allocated": 1022607936764,
|
91 |
+
"requested_bytes.all.current": 458660132,
|
92 |
+
"requested_bytes.all.freed": 1022149276632,
|
93 |
+
"requested_bytes.all.peak": 1251715632,
|
94 |
+
"requested_bytes.large_pool.allocated": 915760019456,
|
95 |
+
"requested_bytes.large_pool.current": 454473728,
|
96 |
+
"requested_bytes.large_pool.freed": 915305545728,
|
97 |
+
"requested_bytes.large_pool.peak": 1235879936,
|
98 |
+
"requested_bytes.small_pool.allocated": 106847917308,
|
99 |
+
"requested_bytes.small_pool.current": 4186404,
|
100 |
+
"requested_bytes.small_pool.freed": 106843730904,
|
101 |
+
"requested_bytes.small_pool.peak": 63687344,
|
102 |
+
"reserved_bytes.all.allocated": 1379926016,
|
103 |
+
"reserved_bytes.all.current": 1379926016,
|
104 |
+
"reserved_bytes.all.freed": 0,
|
105 |
+
"reserved_bytes.all.peak": 1379926016,
|
106 |
+
"reserved_bytes.large_pool.allocated": 1308622848,
|
107 |
+
"reserved_bytes.large_pool.current": 1308622848,
|
108 |
+
"reserved_bytes.large_pool.freed": 0,
|
109 |
+
"reserved_bytes.large_pool.peak": 1308622848,
|
110 |
+
"reserved_bytes.small_pool.allocated": 71303168,
|
111 |
+
"reserved_bytes.small_pool.current": 71303168,
|
112 |
+
"reserved_bytes.small_pool.freed": 0,
|
113 |
+
"reserved_bytes.small_pool.peak": 71303168,
|
114 |
+
"segment.all.allocated": 97,
|
115 |
+
"segment.all.current": 97,
|
116 |
+
"segment.all.freed": 0,
|
117 |
+
"segment.all.peak": 97,
|
118 |
+
"segment.large_pool.allocated": 63,
|
119 |
+
"segment.large_pool.current": 63,
|
120 |
+
"segment.large_pool.freed": 0,
|
121 |
+
"segment.large_pool.peak": 63,
|
122 |
+
"segment.small_pool.allocated": 34,
|
123 |
+
"segment.small_pool.current": 34,
|
124 |
+
"segment.small_pool.freed": 0,
|
125 |
+
"segment.small_pool.peak": 34
|
126 |
+
}
|
127 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/special_tokens_map.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cls_token": "[CLS]",
|
3 |
+
"mask_token": "[MASK]",
|
4 |
+
"pad_token": "[PAD]",
|
5 |
+
"sep_token": "[SEP]",
|
6 |
+
"unk_token": "[UNK]"
|
7 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/tokenizer_config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"added_tokens_decoder": {
|
3 |
+
"0": {
|
4 |
+
"content": "[PAD]",
|
5 |
+
"lstrip": false,
|
6 |
+
"normalized": false,
|
7 |
+
"rstrip": false,
|
8 |
+
"single_word": false,
|
9 |
+
"special": true
|
10 |
+
},
|
11 |
+
"100": {
|
12 |
+
"content": "[UNK]",
|
13 |
+
"lstrip": false,
|
14 |
+
"normalized": false,
|
15 |
+
"rstrip": false,
|
16 |
+
"single_word": false,
|
17 |
+
"special": true
|
18 |
+
},
|
19 |
+
"101": {
|
20 |
+
"content": "[CLS]",
|
21 |
+
"lstrip": false,
|
22 |
+
"normalized": false,
|
23 |
+
"rstrip": false,
|
24 |
+
"single_word": false,
|
25 |
+
"special": true
|
26 |
+
},
|
27 |
+
"102": {
|
28 |
+
"content": "[SEP]",
|
29 |
+
"lstrip": false,
|
30 |
+
"normalized": false,
|
31 |
+
"rstrip": false,
|
32 |
+
"single_word": false,
|
33 |
+
"special": true
|
34 |
+
},
|
35 |
+
"103": {
|
36 |
+
"content": "[MASK]",
|
37 |
+
"lstrip": false,
|
38 |
+
"normalized": false,
|
39 |
+
"rstrip": false,
|
40 |
+
"single_word": false,
|
41 |
+
"special": true
|
42 |
+
}
|
43 |
+
},
|
44 |
+
"clean_up_tokenization_spaces": true,
|
45 |
+
"cls_token": "[CLS]",
|
46 |
+
"do_lower_case": true,
|
47 |
+
"mask_token": "[MASK]",
|
48 |
+
"model_max_length": 512,
|
49 |
+
"pad_token": "[PAD]",
|
50 |
+
"padding_side": "left",
|
51 |
+
"sep_token": "[SEP]",
|
52 |
+
"strip_accents": null,
|
53 |
+
"tokenize_chinese_chars": true,
|
54 |
+
"tokenizer_class": "BertTokenizer",
|
55 |
+
"unk_token": "[UNK]"
|
56 |
+
}
|
Alpha_LoRA/cola/google-bert/bert-base-uncased_lora_lmheadtrain_val_8_16_0.1_5e-05_12345/step_427/vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|