2-adapter-tuning (#29)
Browse files- feat: 2 adapter tuning (362ef00c6df4e2388dfcd39bd635240b4de4a9d2)
- feat: adapter masking wip (c1736a8d39f5dcc90d64dc5e0166322d845ec989)
- feat: adapter masking finished (76fc218113da3b2f66c26ebe0755301a3a67061c)
- remove commented lines (7815d415bc00f30898883911cc44d56a693d822d)
- feat: evaluation/encode (7c7eafb5e7efd8910501703227940b557e3e6a75)
- fix: check adapter (d2c9d0672483f10ce92caa3b6541f2582473c372)
- fix: mlp (4ee2970aeb1c2374482cafd3e6b734ed61f2c342)
- fix: device (65e9690599b3db9b2d532657d35dfafd7fd03578)
- some fixes and suggestions (814cbbb37fe99b95053380e44615018f38a5af8a)
- merge changes (70e22f5f4d394773c3b01c59f86617f99bb8a531)
Co-authored-by: Jack Min Ong <[email protected]>
- block.py +1 -11
- embedding.py +20 -24
- mha.py +31 -36
- mlp.py +25 -19
- modeling_lora.py +13 -12
- modeling_xlm_roberta.py +26 -28
- xlm_padding.py +5 -1
block.py
CHANGED
|
@@ -233,17 +233,7 @@ class Block(nn.Module):
|
|
| 233 |
is_rms_norm=isinstance(self.norm1, RMSNorm),
|
| 234 |
)
|
| 235 |
if not isinstance(self.mlp, nn.Identity):
|
| 236 |
-
|
| 237 |
-
if task_type:
|
| 238 |
-
if isinstance(task_type, tuple):
|
| 239 |
-
assert mixer_kwargs['cu_seqlens'].shape[0] % 9 == 1
|
| 240 |
-
split_index = int((mixer_kwargs['cu_seqlens'].shape[0] - 1) / 9)
|
| 241 |
-
split = mixer_kwargs['cu_seqlens'][split_index]
|
| 242 |
-
mlp_out = self.mlp(hidden_states, task_type=mixer_kwargs.get('task_type'), split=split)
|
| 243 |
-
else:
|
| 244 |
-
mlp_out = self.mlp(hidden_states, task_type=task_type)
|
| 245 |
-
else:
|
| 246 |
-
mlp_out = self.mlp(hidden_states)
|
| 247 |
if self.return_residual: # mlp out is actually a pair here
|
| 248 |
mlp_out, hidden_states = mlp_out
|
| 249 |
if not self.fused_dropout_add_ln:
|
|
|
|
| 233 |
is_rms_norm=isinstance(self.norm1, RMSNorm),
|
| 234 |
)
|
| 235 |
if not isinstance(self.mlp, nn.Identity):
|
| 236 |
+
mlp_out = self.mlp(hidden_states, cu_adapter_mask=mixer_kwargs.get('cu_adapter_mask'))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
if self.return_residual: # mlp out is actually a pair here
|
| 238 |
mlp_out, hidden_states = mlp_out
|
| 239 |
if not self.fused_dropout_add_ln:
|
embedding.py
CHANGED
|
@@ -40,25 +40,25 @@ class XLMRobertaEmbeddings(nn.Module):
|
|
| 40 |
if self.type_vocab_size > 0:
|
| 41 |
self.token_type_embeddings = nn.Embedding(type_vocab_size, embed_dim, **factory_kwargs)
|
| 42 |
|
| 43 |
-
def forward(self, input_ids, position_ids=None, token_type_ids=None,
|
| 44 |
"""
|
| 45 |
input_ids: (batch, seqlen)
|
| 46 |
position_ids: (batch, seqlen)
|
| 47 |
token_type_ids: (batch, seqlen)
|
| 48 |
"""
|
| 49 |
batch_size, seqlen = input_ids.shape
|
| 50 |
-
if
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
|
|
|
|
|
|
| 58 |
else:
|
| 59 |
-
|
| 60 |
-
embeddings = self.word_embeddings(input_ids, **lora_kwargs)
|
| 61 |
-
|
| 62 |
if self.max_position_embeddings > 0:
|
| 63 |
if position_ids is None:
|
| 64 |
position_ids = create_position_ids_from_input_ids(input_ids, padding_idx=self.word_embeddings.padding_idx).to(input_ids.device)
|
|
@@ -68,18 +68,14 @@ class XLMRobertaEmbeddings(nn.Module):
|
|
| 68 |
if self.type_vocab_size > 0:
|
| 69 |
if token_type_ids is None:
|
| 70 |
token_type_ids = torch.zeros(seqlen, dtype=torch.long, device=input_ids.device)
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
emb1 = emb1 + token_type_embs1
|
| 79 |
-
emb2 = emb2 + token_type_embs2
|
| 80 |
-
embeddings = torch.cat((emb1, emb2), dim=0)
|
| 81 |
else:
|
| 82 |
-
|
| 83 |
-
token_type_embeddings = self.token_type_embeddings(token_type_ids, **lora_kwargs)
|
| 84 |
embeddings = embeddings + token_type_embeddings
|
| 85 |
return embeddings
|
|
|
|
| 40 |
if self.type_vocab_size > 0:
|
| 41 |
self.token_type_embeddings = nn.Embedding(type_vocab_size, embed_dim, **factory_kwargs)
|
| 42 |
|
| 43 |
+
def forward(self, input_ids, position_ids=None, token_type_ids=None, adapter_mask=None):
|
| 44 |
"""
|
| 45 |
input_ids: (batch, seqlen)
|
| 46 |
position_ids: (batch, seqlen)
|
| 47 |
token_type_ids: (batch, seqlen)
|
| 48 |
"""
|
| 49 |
batch_size, seqlen = input_ids.shape
|
| 50 |
+
if adapter_mask is not None:
|
| 51 |
+
unique_tasks = torch.unique(adapter_mask)
|
| 52 |
+
embedding_dtype = next(self.word_embeddings.parameters()).dtype
|
| 53 |
+
embeddings = torch.empty(*input_ids.shape, self.word_embeddings.embedding_dim,
|
| 54 |
+
dtype=embedding_dtype, device=input_ids.device)
|
| 55 |
+
for task_id in unique_tasks:
|
| 56 |
+
task_indices = (adapter_mask == task_id).nonzero(as_tuple=True)[0]
|
| 57 |
+
task_input_ids = input_ids[task_indices]
|
| 58 |
+
task_embeddings = self.word_embeddings(task_input_ids, task_id=task_id)
|
| 59 |
+
embeddings[task_indices] = task_embeddings
|
| 60 |
else:
|
| 61 |
+
embeddings = self.word_embeddings(input_ids)
|
|
|
|
|
|
|
| 62 |
if self.max_position_embeddings > 0:
|
| 63 |
if position_ids is None:
|
| 64 |
position_ids = create_position_ids_from_input_ids(input_ids, padding_idx=self.word_embeddings.padding_idx).to(input_ids.device)
|
|
|
|
| 68 |
if self.type_vocab_size > 0:
|
| 69 |
if token_type_ids is None:
|
| 70 |
token_type_ids = torch.zeros(seqlen, dtype=torch.long, device=input_ids.device)
|
| 71 |
+
|
| 72 |
+
if adapter_mask is not None:
|
| 73 |
+
unique_tasks = torch.unique(adapter_mask)
|
| 74 |
+
for task_id in unique_tasks:
|
| 75 |
+
task_token_type_embeddings = self.token_type_embeddings(token_type_ids, task_id=task_id)
|
| 76 |
+
task_indices = (adapter_mask == task_id).nonzero(as_tuple=True)[0]
|
| 77 |
+
embeddings[task_indices] = embeddings[task_indices] + task_token_type_embeddings
|
|
|
|
|
|
|
|
|
|
| 78 |
else:
|
| 79 |
+
token_type_embeddings = self.token_type_embeddings(token_type_ids)
|
|
|
|
| 80 |
embeddings = embeddings + token_type_embeddings
|
| 81 |
return embeddings
|
mha.py
CHANGED
|
@@ -590,7 +590,7 @@ class MHA(nn.Module):
|
|
| 590 |
max_seqlen=None,
|
| 591 |
mixer_subset=None,
|
| 592 |
inference_params=None,
|
| 593 |
-
|
| 594 |
**kwargs,
|
| 595 |
):
|
| 596 |
"""
|
|
@@ -647,35 +647,27 @@ class MHA(nn.Module):
|
|
| 647 |
if not self.cross_attn and self.num_heads_kv == self.num_heads:
|
| 648 |
assert x_kv is None and mixer_subset is None
|
| 649 |
|
| 650 |
-
|
| 651 |
-
|
| 652 |
-
|
| 653 |
-
|
| 654 |
-
|
| 655 |
-
|
| 656 |
-
|
| 657 |
-
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
|
| 662 |
-
|
| 663 |
-
qkv2 = self.Wqkv(tensor2, task_type=task_type[1])
|
| 664 |
-
qkv = torch.cat((qkv1, qkv2), dim=0)
|
| 665 |
-
else:
|
| 666 |
-
qkv = self.Wqkv(x, **lora_kwargs)
|
| 667 |
else:
|
| 668 |
-
if
|
| 669 |
-
|
| 670 |
-
tensor2 = x[split:, :]
|
| 671 |
-
qkv1, tensor1 = self.Wqkv(tensor1, task_type=task_type[0], residual=True)
|
| 672 |
-
qkv2, tensor2 = self.Wqkv(tensor2, task_type=task_type[1], residual=True)
|
| 673 |
-
qkv = torch.cat((qkv1, qkv2), dim=0)
|
| 674 |
-
x = torch.cat((tensor1, tensor2), dim=0)
|
| 675 |
else:
|
| 676 |
-
if
|
| 677 |
-
|
| 678 |
-
|
|
|
|
| 679 |
|
| 680 |
if self.dwconv:
|
| 681 |
qkv = rearrange(
|
|
@@ -762,14 +754,17 @@ class MHA(nn.Module):
|
|
| 762 |
else:
|
| 763 |
context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params)
|
| 764 |
|
| 765 |
-
lora_kwargs.pop('residual', None)
|
| 766 |
inp = rearrange(context, "... h d -> ... (h d)")
|
| 767 |
-
if
|
| 768 |
-
|
| 769 |
-
|
| 770 |
-
|
| 771 |
-
|
| 772 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 773 |
else:
|
| 774 |
-
out = self.out_proj(inp
|
| 775 |
return out if not self.return_residual else (out, x)
|
|
|
|
| 590 |
max_seqlen=None,
|
| 591 |
mixer_subset=None,
|
| 592 |
inference_params=None,
|
| 593 |
+
cu_adapter_mask=None,
|
| 594 |
**kwargs,
|
| 595 |
):
|
| 596 |
"""
|
|
|
|
| 647 |
if not self.cross_attn and self.num_heads_kv == self.num_heads:
|
| 648 |
assert x_kv is None and mixer_subset is None
|
| 649 |
|
| 650 |
+
if cu_adapter_mask is not None:
|
| 651 |
+
unique_tasks = torch.unique(cu_adapter_mask)
|
| 652 |
+
qkv_dtype = next(self.Wqkv.parameters()).dtype
|
| 653 |
+
qkv = torch.empty(x.shape[0], self.Wqkv.out_features,
|
| 654 |
+
dtype=qkv_dtype, device=x.device)
|
| 655 |
+
for task_id in unique_tasks:
|
| 656 |
+
task_indices = (cu_adapter_mask == task_id).nonzero(as_tuple=True)[0]
|
| 657 |
+
task_tensor = x[task_indices]
|
| 658 |
+
if not self.return_residual:
|
| 659 |
+
task_qkv = self.Wqkv(task_tensor, task_id=task_id)
|
| 660 |
+
else:
|
| 661 |
+
task_qkv, _ = self.Wqkv(task_tensor, task_id=task_id, residual=True)
|
| 662 |
+
qkv[task_indices] = task_qkv
|
|
|
|
|
|
|
|
|
|
|
|
|
| 663 |
else:
|
| 664 |
+
if not self.return_residual:
|
| 665 |
+
qkv = self.Wqkv(x)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 666 |
else:
|
| 667 |
+
if hasattr(self.Wqkv, 'parametrizations'):
|
| 668 |
+
qkv, x = self.Wqkv(x, residual=True)
|
| 669 |
+
else:
|
| 670 |
+
qkv, x = self.Wqkv(x)
|
| 671 |
|
| 672 |
if self.dwconv:
|
| 673 |
qkv = rearrange(
|
|
|
|
| 754 |
else:
|
| 755 |
context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params)
|
| 756 |
|
|
|
|
| 757 |
inp = rearrange(context, "... h d -> ... (h d)")
|
| 758 |
+
if cu_adapter_mask is not None:
|
| 759 |
+
unique_tasks = torch.unique(cu_adapter_mask)
|
| 760 |
+
out_dtype = next(self.out_proj.parameters()).dtype
|
| 761 |
+
out = torch.empty(inp.shape[0], self.out_proj.out_features,
|
| 762 |
+
dtype=out_dtype, device=inp.device)
|
| 763 |
+
for task_id in unique_tasks:
|
| 764 |
+
task_indices = (cu_adapter_mask == task_id).nonzero(as_tuple=True)[0]
|
| 765 |
+
task_tensor = inp[task_indices]
|
| 766 |
+
task_out = self.out_proj(task_tensor, task_id=task_id)
|
| 767 |
+
out[task_indices] = task_out
|
| 768 |
else:
|
| 769 |
+
out = self.out_proj(inp)
|
| 770 |
return out if not self.return_residual else (out, x)
|
mlp.py
CHANGED
|
@@ -47,30 +47,36 @@ class Mlp(nn.Module):
|
|
| 47 |
self.activation = activation
|
| 48 |
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
|
| 49 |
|
| 50 |
-
def forward(self, x,
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
| 59 |
else:
|
| 60 |
-
y = self.fc1(x
|
| 61 |
|
| 62 |
y = self.activation(y)
|
| 63 |
|
| 64 |
-
if
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
|
|
|
|
|
|
|
|
|
| 71 |
else:
|
| 72 |
-
|
| 73 |
-
|
|
|
|
| 74 |
|
| 75 |
|
| 76 |
class ParallelMLP(nn.Module):
|
|
|
|
| 47 |
self.activation = activation
|
| 48 |
self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
|
| 49 |
|
| 50 |
+
def forward(self, x, cu_adapter_mask=None):
|
| 51 |
+
if cu_adapter_mask is not None:
|
| 52 |
+
unique_tasks = torch.unique(cu_adapter_mask)
|
| 53 |
+
fc1_dtype = next(self.fc1.parameters()).dtype
|
| 54 |
+
y = torch.empty(x.shape[0], self.fc1.out_features,
|
| 55 |
+
dtype=fc1_dtype, device=x.device)
|
| 56 |
+
for task_id in unique_tasks:
|
| 57 |
+
task_indices = (cu_adapter_mask == task_id).nonzero(as_tuple=True)[0]
|
| 58 |
+
task_tensor = x[task_indices]
|
| 59 |
+
task_y = self.fc1(task_tensor, task_id=task_id)
|
| 60 |
+
y[task_indices] = task_y
|
| 61 |
else:
|
| 62 |
+
y = self.fc1(x)
|
| 63 |
|
| 64 |
y = self.activation(y)
|
| 65 |
|
| 66 |
+
if cu_adapter_mask is not None:
|
| 67 |
+
unique_tasks = torch.unique(cu_adapter_mask)
|
| 68 |
+
fc2_dtype = next(self.fc2.parameters()).dtype
|
| 69 |
+
out = torch.empty(y.shape[0], self.fc2.out_features,
|
| 70 |
+
dtype=fc2_dtype, device=y.device)
|
| 71 |
+
for task_id in unique_tasks:
|
| 72 |
+
task_indices = (cu_adapter_mask == task_id).nonzero(as_tuple=True)[0]
|
| 73 |
+
task_tensor = y[task_indices]
|
| 74 |
+
task_out = self.fc2(task_tensor, task_id=task_id)
|
| 75 |
+
out[task_indices] = task_out
|
| 76 |
else:
|
| 77 |
+
out = self.fc2(y)
|
| 78 |
+
|
| 79 |
+
return out if not self.return_residual else (out, x)
|
| 80 |
|
| 81 |
|
| 82 |
class ParallelMLP(nn.Module):
|
modeling_lora.py
CHANGED
|
@@ -161,7 +161,6 @@ class LoRAParametrization(nn.Module):
|
|
| 161 |
rank: int,
|
| 162 |
dropout_p: float,
|
| 163 |
alpha: float,
|
| 164 |
-
adaptation_map: dict,
|
| 165 |
):
|
| 166 |
if isinstance(layer, nn.Linear):
|
| 167 |
parametrize.register_parametrization(
|
|
@@ -176,10 +175,9 @@ class LoRAParametrization(nn.Module):
|
|
| 176 |
),
|
| 177 |
)
|
| 178 |
|
| 179 |
-
def new_forward(self, input,
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_idx)
|
| 183 |
else:
|
| 184 |
weights = self.weight
|
| 185 |
|
|
@@ -204,10 +202,9 @@ class LoRAParametrization(nn.Module):
|
|
| 204 |
),
|
| 205 |
)
|
| 206 |
|
| 207 |
-
def new_forward(self, input,
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_idx)
|
| 211 |
else:
|
| 212 |
weights = self.weight
|
| 213 |
|
|
@@ -317,7 +314,6 @@ class XLMRobertaLoRA(XLMRobertaPreTrainedModel):
|
|
| 317 |
rank=rank,
|
| 318 |
dropout_p=dropout_p,
|
| 319 |
alpha=alpha,
|
| 320 |
-
adaptation_map=self._adaptation_map,
|
| 321 |
)
|
| 322 |
)
|
| 323 |
|
|
@@ -340,6 +336,7 @@ class XLMRobertaLoRA(XLMRobertaPreTrainedModel):
|
|
| 340 |
@torch.inference_mode()
|
| 341 |
def encode(
|
| 342 |
self,
|
|
|
|
| 343 |
*args,
|
| 344 |
task_type: Optional[str] = None,
|
| 345 |
**kwargs,
|
|
@@ -358,5 +355,9 @@ class XLMRobertaLoRA(XLMRobertaPreTrainedModel):
|
|
| 358 |
f"Supported tasks are: {', '.join(self.config.lora_adaptations)}."
|
| 359 |
f"Alternatively, don't pass the `task_type` argument to disable LoRA."
|
| 360 |
)
|
| 361 |
-
|
| 362 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 161 |
rank: int,
|
| 162 |
dropout_p: float,
|
| 163 |
alpha: float,
|
|
|
|
| 164 |
):
|
| 165 |
if isinstance(layer, nn.Linear):
|
| 166 |
parametrize.register_parametrization(
|
|
|
|
| 175 |
),
|
| 176 |
)
|
| 177 |
|
| 178 |
+
def new_forward(self, input, task_id=None, residual=False):
|
| 179 |
+
if task_id is not None:
|
| 180 |
+
weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_id)
|
|
|
|
| 181 |
else:
|
| 182 |
weights = self.weight
|
| 183 |
|
|
|
|
| 202 |
),
|
| 203 |
)
|
| 204 |
|
| 205 |
+
def new_forward(self, input, task_id=None):
|
| 206 |
+
if task_id is not None:
|
| 207 |
+
weights = self.parametrizations.weight[0].lora_forward(self.weight, current_task=task_id)
|
|
|
|
| 208 |
else:
|
| 209 |
weights = self.weight
|
| 210 |
|
|
|
|
| 314 |
rank=rank,
|
| 315 |
dropout_p=dropout_p,
|
| 316 |
alpha=alpha,
|
|
|
|
| 317 |
)
|
| 318 |
)
|
| 319 |
|
|
|
|
| 336 |
@torch.inference_mode()
|
| 337 |
def encode(
|
| 338 |
self,
|
| 339 |
+
sentences: Union[str, List[str]],
|
| 340 |
*args,
|
| 341 |
task_type: Optional[str] = None,
|
| 342 |
**kwargs,
|
|
|
|
| 355 |
f"Supported tasks are: {', '.join(self.config.lora_adaptations)}."
|
| 356 |
f"Alternatively, don't pass the `task_type` argument to disable LoRA."
|
| 357 |
)
|
| 358 |
+
adapter_mask = None
|
| 359 |
+
if task_type:
|
| 360 |
+
task_id = self._adaptation_map[task_type]
|
| 361 |
+
num_examples = 1 if isinstance(sentences, str) else len(sentences)
|
| 362 |
+
adapter_mask = torch.full((num_examples,), task_id, dtype=torch.int32, device=self.device)
|
| 363 |
+
return self.roberta.encode(sentences, *args, adapter_mask=adapter_mask, **kwargs)
|
modeling_xlm_roberta.py
CHANGED
|
@@ -204,16 +204,15 @@ class XLMRobertaEncoder(nn.Module):
|
|
| 204 |
def gradient_checkpointing(self, value):
|
| 205 |
self._grad_checkpointing = value
|
| 206 |
|
| 207 |
-
def forward(self, hidden_states, key_padding_mask=None, subset_mask=None,
|
| 208 |
"""If subset_mask is not None, we only want output for the subset of the sequence.
|
| 209 |
This means that we only compute the last layer output for these tokens.
|
| 210 |
subset_mask: (batch, seqlen), dtype=torch.bool
|
| 211 |
"""
|
| 212 |
if key_padding_mask is None or not self.use_flash_attn:
|
| 213 |
-
mixer_kwargs = {'
|
| 214 |
if key_padding_mask is not None:
|
| 215 |
mixer_kwargs['key_padding_mask'] = key_padding_mask.bool()
|
| 216 |
-
|
| 217 |
for layer in self.layers:
|
| 218 |
if self._grad_checkpointing:
|
| 219 |
hidden_states = torch.utils.checkpoint.checkpoint(
|
|
@@ -228,10 +227,11 @@ class XLMRobertaEncoder(nn.Module):
|
|
| 228 |
hidden_states = hidden_states[subset_mask]
|
| 229 |
else:
|
| 230 |
batch, seqlen = hidden_states.shape[:2]
|
| 231 |
-
hidden_states, indices, cu_seqlens, max_seqlen_in_batch = unpad_input(
|
| 232 |
-
hidden_states, key_padding_mask
|
| 233 |
)
|
| 234 |
-
mixer_kwargs = {"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen_in_batch, "
|
|
|
|
| 235 |
if subset_mask is None:
|
| 236 |
for layer in self.layers:
|
| 237 |
if self._grad_checkpointing:
|
|
@@ -308,24 +308,22 @@ class XLMRobertaPooler(nn.Module):
|
|
| 308 |
self.dense = linear_cls(config.hidden_size, config.hidden_size)
|
| 309 |
self.activation = nn.Tanh()
|
| 310 |
|
| 311 |
-
def forward(self, hidden_states, pool=True,
|
| 312 |
# We "pool" the model by simply taking the hidden state corresponding
|
| 313 |
# to the first token.
|
| 314 |
-
lora_kwargs = {'task_type': task_type} if task_type is not None else {}
|
| 315 |
-
|
| 316 |
first_token_tensor = hidden_states[:, 0] if pool else hidden_states
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
|
|
|
| 326 |
else:
|
| 327 |
-
pooled_output = self.dense(first_token_tensor
|
| 328 |
-
|
| 329 |
pooled_output = self.activation(pooled_output)
|
| 330 |
return pooled_output
|
| 331 |
|
|
@@ -438,7 +436,6 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
|
|
| 438 |
"gelu_fast",
|
| 439 |
"gelu_pytorch_tanh",
|
| 440 |
]
|
| 441 |
-
|
| 442 |
self.embeddings = XLMRobertaEmbeddings(
|
| 443 |
config.hidden_size,
|
| 444 |
config.vocab_size,
|
|
@@ -466,6 +463,7 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
|
|
| 466 |
device: Optional[torch.device] = None,
|
| 467 |
normalize_embeddings: bool = False,
|
| 468 |
truncate_dim: Optional[int] = None,
|
|
|
|
| 469 |
task_type: Optional[str] = None,
|
| 470 |
**tokenizer_kwargs,
|
| 471 |
) -> Union[List[torch.Tensor], np.ndarray, torch.Tensor]:
|
|
@@ -551,14 +549,14 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
|
|
| 551 |
)
|
| 552 |
else:
|
| 553 |
range_iter = range(0, len(sentences), batch_size)
|
| 554 |
-
|
| 555 |
for i in range_iter:
|
| 556 |
encoded_input = self.tokenizer(
|
| 557 |
sentences[i : i + batch_size],
|
| 558 |
return_tensors='pt',
|
| 559 |
**tokenizer_kwargs,
|
| 560 |
).to(self.device)
|
| 561 |
-
token_embs = self.forward(**encoded_input, **
|
| 562 |
|
| 563 |
# Accumulate in fp32 to avoid overflow
|
| 564 |
token_embs = token_embs.float()
|
|
@@ -646,7 +644,7 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
|
|
| 646 |
layer output for these tokens.
|
| 647 |
masked_tokens_mask: (batch, seqlen), dtype=torch.bool
|
| 648 |
"""
|
| 649 |
-
|
| 650 |
if kwargs:
|
| 651 |
for key, value in kwargs.items():
|
| 652 |
if value is not None:
|
|
@@ -660,7 +658,7 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
|
|
| 660 |
)
|
| 661 |
|
| 662 |
hidden_states = self.embeddings(
|
| 663 |
-
input_ids, position_ids=position_ids, token_type_ids=token_type_ids,
|
| 664 |
)
|
| 665 |
# TD [2022-12:18]: Don't need to force residual in fp32
|
| 666 |
# BERT puts embedding LayerNorm before embedding dropout.
|
|
@@ -684,12 +682,12 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
|
|
| 684 |
subset_mask = None
|
| 685 |
|
| 686 |
sequence_output = self.encoder(
|
| 687 |
-
hidden_states, key_padding_mask=attention_mask, subset_mask=subset_mask,
|
| 688 |
)
|
| 689 |
|
| 690 |
if masked_tokens_mask is None:
|
| 691 |
pooled_output = (
|
| 692 |
-
self.pooler(sequence_output,
|
| 693 |
)
|
| 694 |
else:
|
| 695 |
# TD [2022-03-01]: the indexing here is very tricky.
|
|
@@ -703,7 +701,7 @@ class XLMRobertaModel(XLMRobertaPreTrainedModel):
|
|
| 703 |
pool_input = sequence_output[first_col_mask[subset_mask]]
|
| 704 |
sequence_output = sequence_output[masked_tokens_mask[subset_mask]]
|
| 705 |
pooled_output = (
|
| 706 |
-
self.pooler(pool_input, pool=False,
|
| 707 |
)
|
| 708 |
|
| 709 |
if not return_dict:
|
|
|
|
| 204 |
def gradient_checkpointing(self, value):
|
| 205 |
self._grad_checkpointing = value
|
| 206 |
|
| 207 |
+
def forward(self, hidden_states, key_padding_mask=None, subset_mask=None, adapter_mask=None):
|
| 208 |
"""If subset_mask is not None, we only want output for the subset of the sequence.
|
| 209 |
This means that we only compute the last layer output for these tokens.
|
| 210 |
subset_mask: (batch, seqlen), dtype=torch.bool
|
| 211 |
"""
|
| 212 |
if key_padding_mask is None or not self.use_flash_attn:
|
| 213 |
+
mixer_kwargs = {'adapter_mask': adapter_mask}
|
| 214 |
if key_padding_mask is not None:
|
| 215 |
mixer_kwargs['key_padding_mask'] = key_padding_mask.bool()
|
|
|
|
| 216 |
for layer in self.layers:
|
| 217 |
if self._grad_checkpointing:
|
| 218 |
hidden_states = torch.utils.checkpoint.checkpoint(
|
|
|
|
| 227 |
hidden_states = hidden_states[subset_mask]
|
| 228 |
else:
|
| 229 |
batch, seqlen = hidden_states.shape[:2]
|
| 230 |
+
hidden_states, indices, cu_seqlens, max_seqlen_in_batch, cu_adapter_mask = unpad_input(
|
| 231 |
+
hidden_states, key_padding_mask, adapter_mask
|
| 232 |
)
|
| 233 |
+
mixer_kwargs = {"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen_in_batch, "cu_adapter_mask": cu_adapter_mask}
|
| 234 |
+
|
| 235 |
if subset_mask is None:
|
| 236 |
for layer in self.layers:
|
| 237 |
if self._grad_checkpointing:
|
|
|
|
| 308 |
self.dense = linear_cls(config.hidden_size, config.hidden_size)
|
| 309 |
self.activation = nn.Tanh()
|
| 310 |
|
| 311 |
+
def forward(self, hidden_states, pool=True, adapter_mask=None):
|
| 312 |
# We "pool" the model by simply taking the hidden state corresponding
|
| 313 |
# to the first token.
|
|
|
|
|
|
|
| 314 |
first_token_tensor = hidden_states[:, 0] if pool else hidden_states
|
| 315 |
+
if adapter_mask is not None:
|
| 316 |
+
unique_tasks = torch.unique(adapter_mask)
|
| 317 |
+
pool_dtype = next(self.dense.parameters()).dtype
|
| 318 |
+
pooled_output = torch.empty(first_token_tensor.shape[0], self.dense.out_features,
|
| 319 |
+
dtype=pool_dtype, device=first_token_tensor.device)
|
| 320 |
+
for task_id in unique_tasks:
|
| 321 |
+
task_indices = (adapter_mask == task_id).nonzero(as_tuple=True)[0]
|
| 322 |
+
task_first_token_tensor = first_token_tensor[task_indices]
|
| 323 |
+
task_pooled_output = self.dense(task_first_token_tensor, task_id=task_id)
|
| 324 |
+
pooled_output[task_indices] = task_pooled_output
|
| 325 |
else:
|
| 326 |
+
pooled_output = self.dense(first_token_tensor)
|
|
|
|
| 327 |
pooled_output = self.activation(pooled_output)
|
| 328 |
return pooled_output
|
| 329 |
|
|
|
|
| 436 |
"gelu_fast",
|
| 437 |
"gelu_pytorch_tanh",
|
| 438 |
]
|
|
|
|
| 439 |
self.embeddings = XLMRobertaEmbeddings(
|
| 440 |
config.hidden_size,
|
| 441 |
config.vocab_size,
|
|
|
|
| 463 |
device: Optional[torch.device] = None,
|
| 464 |
normalize_embeddings: bool = False,
|
| 465 |
truncate_dim: Optional[int] = None,
|
| 466 |
+
adapter_mask: Optional[torch.Tensor] = None,
|
| 467 |
task_type: Optional[str] = None,
|
| 468 |
**tokenizer_kwargs,
|
| 469 |
) -> Union[List[torch.Tensor], np.ndarray, torch.Tensor]:
|
|
|
|
| 549 |
)
|
| 550 |
else:
|
| 551 |
range_iter = range(0, len(sentences), batch_size)
|
| 552 |
+
lora_arguments = {'adapter_mask': adapter_mask} if adapter_mask is not None else {}
|
| 553 |
for i in range_iter:
|
| 554 |
encoded_input = self.tokenizer(
|
| 555 |
sentences[i : i + batch_size],
|
| 556 |
return_tensors='pt',
|
| 557 |
**tokenizer_kwargs,
|
| 558 |
).to(self.device)
|
| 559 |
+
token_embs = self.forward(**encoded_input, **lora_arguments)[0]
|
| 560 |
|
| 561 |
# Accumulate in fp32 to avoid overflow
|
| 562 |
token_embs = token_embs.float()
|
|
|
|
| 644 |
layer output for these tokens.
|
| 645 |
masked_tokens_mask: (batch, seqlen), dtype=torch.bool
|
| 646 |
"""
|
| 647 |
+
adapter_mask = kwargs.pop('adapter_mask', None)
|
| 648 |
if kwargs:
|
| 649 |
for key, value in kwargs.items():
|
| 650 |
if value is not None:
|
|
|
|
| 658 |
)
|
| 659 |
|
| 660 |
hidden_states = self.embeddings(
|
| 661 |
+
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, adapter_mask=adapter_mask
|
| 662 |
)
|
| 663 |
# TD [2022-12:18]: Don't need to force residual in fp32
|
| 664 |
# BERT puts embedding LayerNorm before embedding dropout.
|
|
|
|
| 682 |
subset_mask = None
|
| 683 |
|
| 684 |
sequence_output = self.encoder(
|
| 685 |
+
hidden_states, key_padding_mask=attention_mask, subset_mask=subset_mask, adapter_mask=adapter_mask
|
| 686 |
)
|
| 687 |
|
| 688 |
if masked_tokens_mask is None:
|
| 689 |
pooled_output = (
|
| 690 |
+
self.pooler(sequence_output, adapter_mask=adapter_mask) if self.pooler is not None else None
|
| 691 |
)
|
| 692 |
else:
|
| 693 |
# TD [2022-03-01]: the indexing here is very tricky.
|
|
|
|
| 701 |
pool_input = sequence_output[first_col_mask[subset_mask]]
|
| 702 |
sequence_output = sequence_output[masked_tokens_mask[subset_mask]]
|
| 703 |
pooled_output = (
|
| 704 |
+
self.pooler(pool_input, pool=False, adapter_mask=adapter_mask) if self.pooler is not None else None
|
| 705 |
)
|
| 706 |
|
| 707 |
if not return_dict:
|
xlm_padding.py
CHANGED
|
@@ -98,7 +98,7 @@ class IndexFirstAxisResidual(torch.autograd.Function):
|
|
| 98 |
index_first_axis_residual = IndexFirstAxisResidual.apply
|
| 99 |
|
| 100 |
|
| 101 |
-
def unpad_input(hidden_states, attention_mask):
|
| 102 |
"""
|
| 103 |
Arguments:
|
| 104 |
hidden_states: (batch, seqlen, ...)
|
|
@@ -113,6 +113,9 @@ def unpad_input(hidden_states, attention_mask):
|
|
| 113 |
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 114 |
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 115 |
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
|
|
|
|
|
|
|
|
|
| 116 |
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
|
| 117 |
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
|
| 118 |
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
|
|
@@ -123,6 +126,7 @@ def unpad_input(hidden_states, attention_mask):
|
|
| 123 |
indices,
|
| 124 |
cu_seqlens,
|
| 125 |
max_seqlen_in_batch,
|
|
|
|
| 126 |
)
|
| 127 |
|
| 128 |
|
|
|
|
| 98 |
index_first_axis_residual = IndexFirstAxisResidual.apply
|
| 99 |
|
| 100 |
|
| 101 |
+
def unpad_input(hidden_states, attention_mask, adapter_mask=None):
|
| 102 |
"""
|
| 103 |
Arguments:
|
| 104 |
hidden_states: (batch, seqlen, ...)
|
|
|
|
| 113 |
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
|
| 114 |
max_seqlen_in_batch = seqlens_in_batch.max().item()
|
| 115 |
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
|
| 116 |
+
|
| 117 |
+
cu_adapter_mask = torch.repeat_interleave(adapter_mask, cu_seqlens[1:] - cu_seqlens[:-1]) if adapter_mask is not None else None
|
| 118 |
+
|
| 119 |
# TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
|
| 120 |
# bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
|
| 121 |
# times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
|
|
|
|
| 126 |
indices,
|
| 127 |
cu_seqlens,
|
| 128 |
max_seqlen_in_batch,
|
| 129 |
+
cu_adapter_mask,
|
| 130 |
)
|
| 131 |
|
| 132 |
|