python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import torch
from torch import nn
class Pooler(torch.nn.Module):
def __init__(
self,
dim_in: int,
projection_size: int,
widening_factor: int = 4,
use_projection_head: bool = True, #TODO: add to config
use_simsiam_mlp: bool = False
):
super().__init__()
hidden_size = dim_in * widening_factor
self.use_projection_head = use_projection_head
if use_projection_head:
if use_simsiam_mlp:
self.mlp = nn.Sequential(
nn.Linear(dim_in, hidden_size, bias=False),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, hidden_size, bias=False),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size, bias=False),
nn.BatchNorm1d(projection_size, affine=False)
)
else:
self.mlp = nn.Sequential(
nn.Linear(dim_in, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size)
)
def forward(
self,
last_hidden_state: torch.Tensor = None,
) -> torch.Tensor:
batch_size, sequence_length, _ = last_hidden_state.size()
attention_mask = torch.ones(batch_size, sequence_length)
output_vectors = []
input_mask_expanded = attention_mask.unsqueeze(-1).expand(
last_hidden_state.size()).float().to(last_hidden_state.device
)
sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded, 1)
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
output_vectors.append(sum_embeddings / sum_mask)
output_vector = torch.cat(output_vectors, 0)
if self.use_projection_head:
return self.mlp(output_vector) # batch size, projection size
else:
return output_vector # batch size, hidden size
| multimodal-self-distillation-main | src/models/components/pooler.py |
from typing import Optional, List
from dataclasses import dataclass
import torch
from torch import nn
from src.models.components.preprocessor import PreprocessorType
from src.models.components.masking import mask_hidden_states
from src.models.components.outputs import ModelOutput
from src.models.components.pooler import Pooler
class MultiHeadAttention(nn.Module):
"""Multi-head attention"""
def __init__(
self,
kv_dim: int,
q_dim: int,
*,
qk_out_dim: Optional[int] = None,
v_out_dim: Optional[int] = None,
output_dim: Optional[int] = None,
num_heads: int = 1,
dropout: float = 0.0
):
"""Constructor.
Args:
kv_dim: Size of input key and value vectors.
q_dim: Size of input query vector.
qk_out_dim: Size of Query and Key matrices last dimension.
If None, it will be equal to q_dim. Defaults to None.
v_out_dim: Size of Value matrix last dimension.
If None, it will be equal to qk_out_dim. Defaults to None.
output_dim: Size of output after the QKV attention.
If none, it will be equal to v_out_dim. Defaults to None.
num_heads: Number of heads. Defaults to 1.
dropout: Dropout probability. Defaults to 0.0.
"""
super().__init__()
if qk_out_dim is None:
qk_out_dim = q_dim
if v_out_dim is None:
v_out_dim = qk_out_dim
if output_dim is None:
output_dim = v_out_dim
self.num_heads = num_heads
self.qk_head_dim = qk_out_dim // num_heads
self.v_head_dim = v_out_dim // num_heads
self.k = nn.Linear(kv_dim, qk_out_dim)
self.q = nn.Linear(q_dim, qk_out_dim)
self.v = nn.Linear(kv_dim, v_out_dim)
self.projection = nn.Linear(v_out_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.scale = self.qk_head_dim ** -0.5
def transform_for_scores(self, x: torch.Tensor, head_dim: int):
# (..., seq_len, dim) -> (..., n_heads, seq_len, head_dim)
*dims, seq, hid = x.size()
x = x.view(*dims, seq, self.num_heads, head_dim)
return x.transpose(-3, -2)
def forward(
self,
inputs_kv: torch.Tensor,
inputs_q: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None
):
"""
Args:
inputs_kv: Key/Value embeddings of shape (B, ..., M, C).
inputs_q: Query embeddings of shape (B, ..., N, D)
attention_mask: Tensor of shape (B, ..., N, M).
Returns:
Tensor of shape (B, ..., N, D)
"""
keys, queries, values = self.k(inputs_kv), self.q(inputs_q), self.v(inputs_kv)
keys = self.transform_for_scores(keys, self.qk_head_dim)
queries = self.transform_for_scores(queries, self.qk_head_dim)
values = self.transform_for_scores(values, self.v_head_dim)
attention = (queries @ keys.transpose(-2, -1) * self.scale)
if attention_mask is not None:
min_value = torch.finfo(attention.dtype).min
extended_mask = (1 - attention_mask) * min_value
attention = attention + extended_mask
attention = attention.softmax(dim=-1)
attention = self.dropout(attention)
if attention_mask is not None:
attention = attention.masked_fill(1 - attention_mask, value=0)
weighted = attention @ values
# (..., n_heads, seq_len, head_dim) -> (..., seq_len, hid)
*dims, n_heads, seq, hid = weighted.size()
weighted = weighted.transpose(-3, -2)
weighted = weighted.reshape(*dims, seq, n_heads * hid)
return self.projection(weighted)
class FeedForward(nn.Module):
"""Transformer Feed-Forward network."""
def __init__(
self,
dim: int,
widening_factor: int = 4,
dropout: float = 0.0
):
"""Constructor.
Args:
dim: Dimension of input tensor.
widening_factor: Widening factor. Defaults to 4.
dropout: Dropout probability. Defaults to 0.
"""
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(dim, dim * widening_factor),
nn.GELU(),
nn.Linear(dim * widening_factor, dim),
nn.Dropout(dropout)
)
def forward(self, x: torch.Tensor):
return self.mlp(x)
class SelfAttention(nn.Module):
"""Self-attention module."""
def __init__(
self,
*,
hidden_dim: int,
qk_out_dim: Optional[int] = None,
v_out_dim: Optional[int] = None,
widening_factor: int = 4,
num_heads: int = 1,
dropout: float = 0.0,
attention_dropout: float = 0.0
):
"""Constructor.
Args:
hidden_dim: Dimension of input tensor.
qk_out_dim: Size of Query and Key matrices last dimension.
Defaults to None.
v_out_dim: Size of Value matrix last dimension.
Defaults to None.
widening_factor: Feed-forward network widening factor.
Defaults to 4.
num_heads: Number of attention heads. Defaults to 1.
dropout: Dropout probability. Defaults to 0.
attention_dropout: Attention scores probability. Defaults to 0.
"""
super().__init__()
self.layer_norm = nn.LayerNorm(hidden_dim)
self.qkv_layer_norm = nn.LayerNorm(hidden_dim)
self.attention = MultiHeadAttention(
kv_dim=hidden_dim,
q_dim=hidden_dim,
qk_out_dim=qk_out_dim,
v_out_dim=v_out_dim,
output_dim=hidden_dim,
num_heads=num_heads,
dropout=attention_dropout
)
self.dropout = nn.Dropout(dropout)
self.mlp = FeedForward(hidden_dim, widening_factor, dropout)
def forward(
self,
x: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None
):
"""
Args:
x: Input tensor of shape (B, ..., M, C).
attention_mask: Input mask tensor of shape (B, ..., M, M).
Mask values selected in [0, 1]. Defaults to None.
"""
x_norm = self.layer_norm(x)
attention = self.attention(
inputs_kv=x_norm,
inputs_q=x_norm,
attention_mask=attention_mask
)
attention = self.dropout(attention)
x = x + attention
x = x + self.mlp(self.qkv_layer_norm(x))
return x
class CrossAttention(nn.Module):
"""Cross-attention module."""
def __init__(
self,
*,
kv_dim: int,
q_dim: int,
qk_out_dim: Optional[int] = None,
v_out_dim: Optional[int] = None,
widening_factor: int = 1,
num_heads: int = 1,
use_query_residual: bool = True,
dropout: float = 0.0,
attention_dropout: float = 0.0
):
"""Constructor.
Args:
kv_dim: Dimension of key/value input tensor.
q_dim: Dimension of query input tensor.
qk_out_dim: Size of Query and Key matrices last dimension.
Defaults to None.
v_out_dim: Size of Value matrix last dimension.
Defaults to None.
widening_factor: Feed-forward network widening factor.
Defaults to 4.
num_heads: Number of attention heads. Defaults to 1.
use_query_residual: Indicates whether to use query residual in
cross-attention. Defaults to True.
dropout: Dropout probability. Defaults to 0.
attention_dropout: Attention scores probability. Defaults to 0.
"""
super().__init__()
self.use_query_residual = use_query_residual
self.kv_layer_norm = nn.LayerNorm(kv_dim)
self.q_layer_norm = nn.LayerNorm(q_dim)
self.qkv_layer_norm = nn.LayerNorm(q_dim)
self.attention = MultiHeadAttention(
kv_dim=kv_dim,
q_dim=q_dim,
qk_out_dim=qk_out_dim,
v_out_dim=v_out_dim,
output_dim=q_dim,
num_heads=num_heads,
dropout=attention_dropout
)
self.dropout = nn.Dropout(dropout)
self.mlp = FeedForward(q_dim, widening_factor, dropout)
def forward(
self,
inputs_kv: torch.Tensor,
inputs_q: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None
):
"""
Args:
inputs_kv: Key/Value embeddings of shape (B, ..., M, C).
inputs_q: Query embeddings of shape (B, ..., N, D)
attention_mask: Tensor of shape (B, ..., N, M). Mask values selected
in [0, 1]. Defaults to None.
"""
attention = self.attention(
inputs_kv=self.kv_layer_norm(inputs_kv),
inputs_q=self.q_layer_norm(inputs_q),
attention_mask=attention_mask
)
attention = self.dropout(attention)
if self.use_query_residual:
x = inputs_q + attention
else:
x = attention
x = x + self.mlp(self.qkv_layer_norm(x))
return x
class PerceiverBlock(nn.Module):
"""Basic Hierarchical Perceiver block. Consists of learned set of latent vectors (one for each group),
cross-attention encoding layer and number of self-attention processing layers.
All parameters of cross- and self-attention layers are shared.
"""
def __init__(
self,
input_dim: int,
num_groups: int,
num_latents: int,
hidden_size: int,
num_self_attn_layers: int = 1,
num_cross_attn_heads: int = 1,
num_self_attn_heads: int = 1,
qk_out_dim: Optional[int] = None,
v_out_dim: Optional[int] = None,
cross_attn_widening_factor: int = 1,
self_attn_widening_factor: int = 1,
use_query_residual: bool = True,
dropout: float = 0.0,
cross_attn_dropout: float = 0.0,
self_attn_dropout: float = 0.0
):
super().__init__()
self.num_groups = num_groups
self.hidden_size = hidden_size
self.latents = nn.Parameter(torch.randn(num_groups, num_latents, hidden_size)) #? is this producing the latent arrary?
self.cross_attention = CrossAttention(
kv_dim=input_dim,
q_dim=hidden_size,
num_heads=num_cross_attn_heads,
dropout=dropout,
attention_dropout=cross_attn_dropout,
widening_factor=cross_attn_widening_factor,
use_query_residual=use_query_residual,
qk_out_dim=qk_out_dim,
v_out_dim=v_out_dim
)
self.self_attention_layers = nn.ModuleList([
SelfAttention(
hidden_dim=hidden_size,
num_heads=num_self_attn_heads,
dropout=dropout,
attention_dropout=self_attn_dropout,
widening_factor=self_attn_widening_factor,
qk_out_dim=qk_out_dim,
v_out_dim=v_out_dim
) for _ in range(num_self_attn_layers)
])
def forward(self, inputs, attention_mask=None):
*dims, seq_len, input_dim = inputs.size()
if attention_mask is not None:
# (bs, seq_len) -> (bs, num_groups, group_len)
attention_mask = attention_mask.view(*dims, self.num_groups, -1)
# (bs, num_groups, group_len) -> (bs, num_groups, num_heads, q_seq_len, kv_seq_len)
# num_groups and q_seq_len are broadcast
# group_len is the same as kv_seq_len
attention_mask = attention_mask[:, :, None, None, :]
# (..., seq_len, hid_dim) -> (..., num_groups, group_len, hid_dim)
inputs = inputs.view(*dims, self.num_groups, -1, input_dim)
latents = self.cross_attention(inputs, self.latents, attention_mask)
for self_attention in self.self_attention_layers:
latents = self_attention(latents)
# (.., num_groups, group_len, latent_dim) -> (.., seq_len, hid_dim)
*_, latents_dim = latents.size()
outputs = latents.view(*dims, -1, latents_dim)
return outputs
@dataclass
class BlockConfig:
num_groups: int
num_self_attn_layers: int
num_self_attn_heads: int
num_latents: int
hidden_size: int
class HiP(nn.Module):
def __init__(
self,
input_dim: int,
block_configs: List[BlockConfig]
):
super().__init__()
layers = []
input_dim = input_dim
for cfg in block_configs:
layer = PerceiverBlock(
input_dim=input_dim,
num_groups=cfg.num_groups,
num_self_attn_layers=cfg.num_self_attn_layers,
num_self_attn_heads=cfg.num_self_attn_heads,
num_latents=cfg.num_latents,
hidden_size=cfg.hidden_size
)
layers.append(layer)
input_dim = cfg.hidden_size
self.layers = nn.ModuleList(layers)
def forward(self, x, attention_mask=None):
hidden_states = []
for i, layer in enumerate(self.layers):
x = layer(x, attention_mask)
hidden_states.append(x)
attention_mask = None
return x, hidden_states
class HiPModel(nn.Module):
def __init__(
self,
preprocessor: PreprocessorType,
hip: HiP,
is_student: bool = False,
is_training: bool = False,
mask_time_prob: float = 0.05,
mask_time_length: int = 10,
use_projection_head: bool = True,
use_simsiam_mlp: bool = False
):
super().__init__()
self.preprocessor = preprocessor
self.hip = hip
self.pooler = Pooler(
dim_in=self.hip.layers[-1].hidden_size,
projection_size=self.hip.layers[-1].hidden_size,
use_projection_head=use_projection_head,
use_simsiam_mlp=use_simsiam_mlp
)
self.is_student = is_student
self.is_training = is_training
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
def set_student_status(self, is_student: bool):
self.is_student = is_student
def forward(self, x, attention_mask=None, apply_mask=True):
x, _, _ = self.preprocessor(x)
batch_size, seq_length, _ = x.size()
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length))) #TODO move to CUDA
if self.is_student and apply_mask:
x = mask_hidden_states(
hidden_states=x,
attention_mask=attention_mask,
mask_time_prob=self.mask_time_prob,
mask_time_length=self.mask_time_length,
training=self.is_training
)
x, hidden_states = self.hip(x, attention_mask)
pooler_output = self.pooler(x)
return ModelOutput(
pooler_output=pooler_output,
last_hidden_state=x,
hidden_states=hidden_states
)
| multimodal-self-distillation-main | src/models/components/hip.py |
from typing import Tuple
import torch
from torch.nn import functional as F
def k_nearest_neighbor(
prediction_features: torch.Tensor,
query_features: torch.Tensor = None,
labels: torch.Tensor = None,
num_classes: int = 1000,
k: int = 20,
chunking: bool = True,
) -> Tuple:
probabilities = []
predictions = []
temperature = 0.1
num_classes = len(set(list(labels.numpy())))
if query_features is None:
# means that similarity is computed between prediction features and itself
query_features = prediction_features
zero_diagonal = True
trim_preds = False
else:
zero_diagonal = False
trim_preds = True
if chunking:
num_chunks = 10 # this was 100 but had to be reduced to 10 to avoid OOM for local testing (was it really??)
num_test_samples = query_features.size()[0]
samples_per_chunk = num_test_samples // num_chunks
for idx in range(0, num_test_samples, samples_per_chunk):
query_chunk_features = query_features[
idx : min((idx + samples_per_chunk), num_test_samples), :
]
chunk_labels = labels[
idx : min((idx + samples_per_chunk), num_test_samples)
]
batch_size = chunk_labels.shape[0]
_, _, _, _, probs, _, preds = knn_core(
prediction_features,
query_chunk_features,
labels,
k,
temperature,
zero_diagonal,
num_classes,
batch_size)
probabilities.append(probs)
predictions.append(preds)
probabilities = torch.cat(probabilities, dim=0)
predictions = torch.cat(predictions, dim=0)
return probabilities, predictions, labels
else:
batch_size = labels.shape[0]
return knn_core(
prediction_features,
query_features,
labels,
k,
temperature,
zero_diagonal,
num_classes,
batch_size)
def knn_core(prediction_features, query_features, labels, k, temperature, zero_diagonal, num_classes, batch_size):
# if k is larger than the number of prediction features, we just use all of them
if k > len(prediction_features):
k = len(prediction_features)
similarity = F.normalize(query_features) @ F.normalize(prediction_features).t()
similarity_ground_truth = torch.diag(similarity)
torch.diagonal(similarity, 0).zero_() if zero_diagonal else None
distances, indices = similarity.topk(k, largest=True, sorted=True)
candidates = labels.view(1, -1).expand(batch_size, -1)
retrieved_neighbors = torch.gather(candidates, 1, indices)
retrieval_one_hot = torch.zeros(batch_size * k, num_classes)
retrieval_one_hot.scatter_(1, retrieved_neighbors.view(-1, 1), 1)
distances_transform = (distances / temperature).exp_()
probs = torch.sum(
torch.mul(
retrieval_one_hot.view(batch_size, -1, num_classes),
distances_transform.view(batch_size, -1, 1),
),
1,
)
probs.div_(probs.sum(dim=1, keepdim=True))
probs_sorted, predictions = probs.sort(1, True)
return similarity, similarity_ground_truth, distances, indices, probs, probs_sorted, predictions
| multimodal-self-distillation-main | src/models/components/knn.py |
import torch
# output classes for bi-encoder and mm-encoder account for flexibility in case of additional byol or data2vec outputs
class DispatcherOutput:
def __init__(
self,
student_input,
teacher_inputs,
align_fuse,
apply_mask: bool,
labels: torch.Tensor,
output_modalities: dict,
metric: str,
num_classes: int,
) -> None:
self.student_input = student_input
self.teacher_inputs = teacher_inputs
self.align_fuse = align_fuse
self.apply_mask = apply_mask
self.labels = labels
self.output_modalities = output_modalities
self.metric = metric
self.num_classes = num_classes
def set_attributes(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class ModelOutput:
def __init__(
self,
pooler_output: torch.Tensor,
last_hidden_state: torch.Tensor,
hidden_states: torch.Tensor,
attentions: torch.Tensor,
cross_attentions: torch.Tensor
) -> None:
self.pooler_output = pooler_output
self.last_hidden_state = last_hidden_state
self.hidden_states = hidden_states
self.attentions = attentions
self.cross_attentions = cross_attentions
def set_attributes(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class CriterionOutput:
def __init__(
self,
total_loss: torch.Tensor,
latent_loss: torch.Tensor = None,
align_loss: torch.Tensor = None,
) -> None:
self.total_loss = total_loss
self.latent_loss = latent_loss
self.align_loss = align_loss
def set_attributes(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class ForwardPassOutput:
def __init__(
self,
student_output: ModelOutput = None,
teacher_output: ModelOutput = None,
align_fuse: dict = None,
labels: torch.Tensor = None,
output_modalities: dict = None,
metric: str = None,
num_classes: int = None,
criterion_output: CriterionOutput = None,
) -> None:
self.student_output = student_output
self.teacher_output = teacher_output
self.align_fuse = align_fuse
self.labels = labels
self.output_modalities = output_modalities
self.metric = metric
self.num_classes = num_classes,
self.criterion_output = criterion_output
def set_attributes(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
| multimodal-self-distillation-main | src/models/components/outputs.py |
import pytest
import torch
import triton
import triton.language as tl
from flashtriton.attention import attention
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 9, 1024, 64)])
@pytest.mark.parametrize('causal', [False, True])
def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.float16):
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=0)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=0)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=0)
try:
from flash_attn.flash_attn_interface import flash_attn_qkvpacked_func as flash_attn_func
FLASH_VER = 2
except BaseException:
try:
from flash_attn.flash_attn_interface import flash_attn_func
FLASH_VER = 1
except BaseException:
FLASH_VER = None
HAS_FLASH = FLASH_VER is not None
BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
# vary seq length for fixed head and batch=4
configs = [triton.testing.Benchmark(
x_names=['N_CTX'],
x_vals=[2**i for i in range(10, 15)],
line_arg='provider',
line_vals=['triton'] + (['flash'] if HAS_FLASH else []),
line_names=['Triton'] + ([f'Flash-{FLASH_VER}'] if HAS_FLASH else []),
styles=[('red', '-'), ('blue', '-')],
ylabel='ms',
plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}',
args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.float16, 'mode': mode, 'causal': causal}
) for mode in ['fwd', 'bwd'] for causal in [False, True]]
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, D_HEAD, causal, mode, provider="triton", dtype=torch.float16, device="cuda"):
assert mode in ['fwd', 'bwd']
warmup = 25
rep = 100
if provider == "triton":
q = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True)
if FLASH_VER == 1:
lengths = torch.full((BATCH,), fill_value=N_CTX, device=device)
cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32)
cu_seqlens[1:] = lengths.cumsum(0)
qkv = qkv.reshape(BATCH * N_CTX, 3, H, D_HEAD)
fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=causal)
elif FLASH_VER == 2:
fn = lambda: flash_attn_func(qkv, causal=causal)
else:
raise ValueError(f'unknown {FLASH_VER = }')
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2. * BATCH * H * N_CTX * N_CTX * D_HEAD
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == 'bwd':
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops / ms * 1e-9
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path='.', print_data=True) | FlashAttention20Triton-main | benchmark_flash_triton.py |
FlashAttention20Triton-main | benchmark_mpt.py |
|
import time
import torch
import pytest
from flashtriton.flash_torch import FlashAttention
# Model Arguments
args = {
"dim": 512,
"heads": 8,
"dim_head": 64,
"causal": False,
"q_bucket_size": 512,
"k_bucket_size": 1024,
"parallel": False,
"mixed_precision": False
}
# Initialize model
model = FlashAttention(**args)
model.cuda()
# Generate some input data
x = torch.randn(64, 1024, args['dim']).cuda()
def test_flash_attention_forward():
# Start timing
start_time = time.time()
# Run method
model(x)
# End timing
end_time = time.time()
# Print execution time
print(f'Execution time for sequence length 1024: {end_time - start_time} seconds')
def test_flash_attention_forward_scaling():
# Modify sequence length and run benchmark
x = torch.randn(64, 16000, args['dim']).cuda()
# Start timing
start_time = time.time()
# Run method
model(x)
# End timing
end_time = time.time()
# Print execution time
print(f'Execution time for sequence length 16000: {end_time - start_time} seconds')
# Run tests
if __name__ == "__main__":
test_flash_attention_forward()
test_flash_attention_forward_scaling()
| FlashAttention20Triton-main | benchmark_flash_torch.py |
import torch
from flashtriton.attention import attention
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
import math
import torch
import torch.nn.functional as F
from torch import nn
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
from .multiway_network import MultiwayWrapper
from .xpos_relative_position import XPOS
class MultiheadAttention(nn.Module):
def __init__(
self,
args,
embed_dim,
num_heads,
dropout=0.0,
self_attention=False,
encoder_decoder_attention=False,
subln=False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
is_first_step=False,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None and not is_first_step:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn, attn_weights
q = torch.randn((4, 48, 22009, 64), dtype=torch.float16, device="cuda", requires_grad=True)
k = torch.randn((4, 48, 22009, 64), dtype=torch.float16, device="cuda", requires_grad=True)
v = torch.randn((4, 48, 22009, 64), dtype=torch.float16, device="cuda", requires_grad=True)
casual=True
sm_scale=1.3
#forward pass
output = attention(q, k, v, casual, sm_scale)
#backward pass
result = output.backward(torch.ones_like(output))
print(f'Result: {result}')
| FlashAttention20Triton-main | flashtriton/flash_mha.py |
import pytest
import torch
import triton
import triton.language as tl
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
L,
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
stride_oz, stride_oh, stride_om, stride_on,
Z, H, N_CTX,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
IS_CAUSAL: tl.constexpr,
):
start_m = tl.program_id(0)
off_hz = tl.program_id(1)
qvk_offset = off_hz * stride_qh
Q_block_ptr = tl.make_block_ptr(
base=Q + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_qm, stride_qk),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
K_block_ptr = tl.make_block_ptr(
base=K + qvk_offset,
shape=(BLOCK_DMODEL, N_CTX),
strides=(stride_kk, stride_kn),
offsets=(0, 0),
block_shape=(BLOCK_DMODEL, BLOCK_N),
order=(0, 1)
)
V_block_ptr = tl.make_block_ptr(
base=V + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_vk, stride_vn),
offsets=(0, 0),
block_shape=(BLOCK_N, BLOCK_DMODEL),
order=(1, 0)
)
# initialize offsets
offs_m = start_m * BLOCK_M + tl.arange(0, BLOCK_M)
offs_n = tl.arange(0, BLOCK_N)
# initialize pointer to m and l
m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# scale sm_scale by log_2(e) and use
# 2^x instead of exp in the loop because CSE and LICM
# don't work as expected with `exp` in the loop
qk_scale = sm_scale * 1.44269504
# load q: it will stay in SRAM throughout
q = tl.load(Q_block_ptr)
q = (q * qk_scale).to(tl.float16)
# loop over k, v and update accumulator
lo = 0
hi = (start_m + 1) * BLOCK_M if IS_CAUSAL else N_CTX
for start_n in range(lo, hi, BLOCK_N):
# -- load k, v --
k = tl.load(K_block_ptr)
v = tl.load(V_block_ptr)
# -- compute qk ---
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
if IS_CAUSAL:
qk = tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), qk, float("-inf"))
qk += tl.dot(q, k)
# -- compute scaling constant ---
m_i_new = tl.maximum(m_i, tl.max(qk, 1))
alpha = tl.math.exp2(m_i - m_i_new)
p = tl.math.exp2(qk - m_i_new[:, None])
# -- scale and update acc --
acc_scale = l_i * 0 + alpha # workaround some compiler bug
acc *= acc_scale[:, None]
acc += tl.dot(p.to(tl.float16), v)
# -- update m_i and l_i --
l_i = l_i * alpha + tl.sum(p, 1)
m_i = m_i_new
# update pointers
K_block_ptr = tl.advance(K_block_ptr, (0, BLOCK_N))
V_block_ptr = tl.advance(V_block_ptr, (BLOCK_N, 0))
# write back l and m
acc = acc / l_i[:, None]
l_ptrs = L + off_hz * N_CTX + offs_m
tl.store(l_ptrs, m_i + tl.math.log2(l_i))
# write back O
O_block_ptr = tl.make_block_ptr(
base=Out + qvk_offset,
shape=(N_CTX, BLOCK_DMODEL),
strides=(stride_om, stride_on),
offsets=(start_m * BLOCK_M, 0),
block_shape=(BLOCK_M, BLOCK_DMODEL),
order=(1, 0)
)
tl.store(O_block_ptr, acc.to(tl.float16))
@triton.jit
def _bwd_preprocess(
Out, DO,
Delta,
BLOCK_M: tl.constexpr, D_HEAD: tl.constexpr,
):
off_m = tl.program_id(0) * BLOCK_M + tl.arange(0, BLOCK_M)
off_n = tl.arange(0, D_HEAD)
# load
o = tl.load(Out + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
do = tl.load(DO + off_m[:, None] * D_HEAD + off_n[None, :]).to(tl.float32)
# compute
delta = tl.sum(o * do, axis=1)
# write-back
tl.store(Delta + off_m, delta)
@triton.jit
def _bwd_kernel(
Q, K, V, sm_scale, Out, DO,
DQ, DK, DV,
L,
D,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk, stride_vn,
Z, H, N_CTX,
num_block,
BLOCK_M: tl.constexpr, BLOCK_DMODEL: tl.constexpr,
BLOCK_N: tl.constexpr,
CAUSAL: tl.constexpr,
):
off_hz = tl.program_id(0)
off_z = off_hz // H
off_h = off_hz % H
qk_scale = sm_scale * 1.44269504
# offset pointers for batch/head
Q += off_z * stride_qz + off_h * stride_qh
K += off_z * stride_qz + off_h * stride_qh
V += off_z * stride_qz + off_h * stride_qh
DO += off_z * stride_qz + off_h * stride_qh
DQ += off_z * stride_qz + off_h * stride_qh
DK += off_z * stride_qz + off_h * stride_qh
DV += off_z * stride_qz + off_h * stride_qh
for start_n in range(0, num_block):
if CAUSAL:
lo = start_n * BLOCK_M
else:
lo = 0
# initialize row/col offsets
offs_qm = lo + tl.arange(0, BLOCK_M)
offs_n = start_n * BLOCK_M + tl.arange(0, BLOCK_M)
offs_m = tl.arange(0, BLOCK_N)
offs_k = tl.arange(0, BLOCK_DMODEL)
# initialize pointers to value-like data
q_ptrs = Q + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
k_ptrs = K + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
v_ptrs = V + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
do_ptrs = DO + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dq_ptrs = DQ + (offs_qm[:, None] * stride_qm + offs_k[None, :] * stride_qk)
# pointer to row-wise quantities in value-like data
D_ptrs = D + off_hz * N_CTX
l_ptrs = L + off_hz * N_CTX
# initialize dv amd dk
dv = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
dk = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32)
# k and v stay in SRAM throughout
k = tl.load(k_ptrs)
v = tl.load(v_ptrs)
# loop over rows
for start_m in range(lo, num_block * BLOCK_M, BLOCK_M):
offs_m_curr = start_m + offs_m
# load q, k, v, do on-chip
q = tl.load(q_ptrs)
# recompute p = softmax(qk, dim=-1).T
if CAUSAL:
qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), float(0.), float("-inf"))
else:
qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32)
qk += tl.dot(q, tl.trans(k))
qk *= qk_scale
l_i = tl.load(l_ptrs + offs_m_curr)
p = tl.math.exp2(qk - l_i[:, None])
# compute dv
do = tl.load(do_ptrs)
dv += tl.dot(tl.trans(p.to(Q.dtype.element_ty)), do)
# compute dp = dot(v, do)
Di = tl.load(D_ptrs + offs_m_curr)
dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None]
dp += tl.dot(do, tl.trans(v))
# compute ds = p * (dp - delta[:, None])
ds = p * dp * sm_scale
# compute dk = dot(ds.T, q)
dk += tl.dot(tl.trans(ds.to(Q.dtype.element_ty)), q)
# compute dq
dq = tl.load(dq_ptrs)
dq += tl.dot(ds.to(Q.dtype.element_ty), k)
tl.store(dq_ptrs, dq)
# increment pointers
dq_ptrs += BLOCK_M * stride_qm
q_ptrs += BLOCK_M * stride_qm
do_ptrs += BLOCK_M * stride_qm
# write-back
dv_ptrs = DV + (offs_n[:, None] * stride_qm + offs_k[None, :] * stride_qk)
dk_ptrs = DK + (offs_n[:, None] * stride_kn + offs_k[None, :] * stride_kk)
tl.store(dv_ptrs, dv)
tl.store(dk_ptrs, dk)
empty = torch.empty(128, device="cuda")
class _attention(torch.autograd.Function):
@staticmethod
def forward(ctx, q, k, v, causal, sm_scale):
# shape constraints
Lq, Lk, Lv = q.shape[-1], k.shape[-1], v.shape[-1]
assert Lq == Lk and Lk == Lv
assert Lk in {16, 32, 64, 128}
o = torch.empty_like(q)
BLOCK_M = 128
BLOCK_N = 64
grid = (triton.cdiv(q.shape[2], BLOCK_M), q.shape[0] * q.shape[1], 1)
L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32)
num_warps = 4 if Lk <= 64 else 8
_fwd_kernel[grid](
q, k, v, sm_scale,
L,
o,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
o.stride(0), o.stride(1), o.stride(2), o.stride(3),
q.shape[0], q.shape[1], q.shape[2],
BLOCK_M=BLOCK_M, BLOCK_N=BLOCK_N, BLOCK_DMODEL=Lk,
IS_CAUSAL=causal,
num_warps=num_warps,
num_stages=4)
ctx.save_for_backward(q, k, v, o, L)
ctx.grid = grid
ctx.sm_scale = sm_scale
ctx.BLOCK_DMODEL = Lk
ctx.causal = causal
return o
@staticmethod
def backward(ctx, do):
BLOCK = 128
q, k, v, o, L = ctx.saved_tensors
do = do.contiguous()
dq = torch.zeros_like(q, dtype=torch.float32)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
delta = torch.empty_like(L)
_bwd_preprocess[(ctx.grid[0] * ctx.grid[1], )](
o, do,
delta,
BLOCK_M=BLOCK, D_HEAD=ctx.BLOCK_DMODEL,
)
_bwd_kernel[(ctx.grid[1],)](
q, k, v, ctx.sm_scale,
o, do,
dq, dk, dv,
L, delta,
q.stride(0), q.stride(1), q.stride(2), q.stride(3),
k.stride(0), k.stride(1), k.stride(2), k.stride(3),
v.stride(0), v.stride(1), v.stride(2), v.stride(3),
q.shape[0], q.shape[1], q.shape[2],
ctx.grid[0],
BLOCK_M=BLOCK, BLOCK_N=BLOCK,
BLOCK_DMODEL=ctx.BLOCK_DMODEL, num_warps=8,
CAUSAL=ctx.causal,
num_stages=1,
)
return dq, dk, dv, None, None
attention = _attention.apply
@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(6, 9, 1024, 64)])
@pytest.mark.parametrize('causal', [False, True])
def test_op(Z, H, N_CTX, D_HEAD, causal, dtype=torch.float16):
torch.manual_seed(20)
q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0., std=0.5).requires_grad_()
sm_scale = 0.5
dout = torch.randn_like(q)
# reference implementation
M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda"))
p = torch.matmul(q, k.transpose(2, 3)) * sm_scale
if causal:
p[:, :, M == 0] = float("-inf")
p = torch.softmax(p.float(), dim=-1).half()
# p = torch.exp(p)
ref_out = torch.matmul(p, v)
ref_out.backward(dout)
ref_dv, v.grad = v.grad.clone(), None
ref_dk, k.grad = k.grad.clone(), None
ref_dq, q.grad = q.grad.clone(), None
# triton implementation
tri_out = attention(q, k, v, causal, sm_scale).half()
tri_out.backward(dout)
tri_dv, v.grad = v.grad.clone(), None
tri_dk, k.grad = k.grad.clone(), None
tri_dq, q.grad = q.grad.clone(), None
# compare
assert torch.allclose(ref_out, tri_out, atol=1e-2, rtol=0)
assert torch.allclose(ref_dv, tri_dv, atol=1e-2, rtol=0)
assert torch.allclose(ref_dk, tri_dk, atol=1e-2, rtol=0)
assert torch.allclose(ref_dq, tri_dq, atol=1e-2, rtol=0)
try:
from flash_attn.flash_attn_interface import flash_attn_qkvpacked_func as flash_attn_func
FLASH_VER = 2
except BaseException:
try:
from flash_attn.flash_attn_interface import flash_attn_func
FLASH_VER = 1
except BaseException:
FLASH_VER = None
HAS_FLASH = FLASH_VER is not None
BATCH, N_HEADS, N_CTX, D_HEAD = 4, 48, 4096, 64
# vary seq length for fixed head and batch=4
configs = [triton.testing.Benchmark(
x_names=['N_CTX'],
x_vals=[2**i for i in range(10, 15)],
line_arg='provider',
line_vals=['triton'] + (['flash'] if HAS_FLASH else []),
line_names=['Triton'] + ([f'Flash-{FLASH_VER}'] if HAS_FLASH else []),
styles=[('red', '-'), ('blue', '-')],
ylabel='ms',
plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}',
args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.float16, 'mode': mode, 'causal': causal}
) for mode in ['fwd', 'bwd'] for causal in [False, True]]
@triton.testing.perf_report(configs)
def bench_flash_attention(BATCH, H, N_CTX, D_HEAD, causal, mode, provider="triton", dtype=torch.float16, device="cuda"):
assert mode in ['fwd', 'bwd']
warmup = 25
rep = 100
if provider == "triton":
q = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
k = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
v = torch.randn((BATCH, H, N_CTX, D_HEAD), dtype=dtype, device="cuda", requires_grad=True)
sm_scale = 1.3
fn = lambda: attention(q, k, v, causal, sm_scale)
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
if provider == "flash":
qkv = torch.randn((BATCH, N_CTX, 3, H, D_HEAD), dtype=dtype, device=device, requires_grad=True)
if FLASH_VER == 1:
lengths = torch.full((BATCH,), fill_value=N_CTX, device=device)
cu_seqlens = torch.zeros((BATCH + 1,), device=device, dtype=torch.int32)
cu_seqlens[1:] = lengths.cumsum(0)
qkv = qkv.reshape(BATCH * N_CTX, 3, H, D_HEAD)
fn = lambda: flash_attn_func(qkv, cu_seqlens, 0., N_CTX, causal=causal)
elif FLASH_VER == 2:
fn = lambda: flash_attn_func(qkv, causal=causal)
else:
raise ValueError(f'unknown {FLASH_VER = }')
if mode == 'bwd':
o = fn()
do = torch.randn_like(o)
fn = lambda: o.backward(do, retain_graph=True)
ms = triton.testing.do_bench(fn, warmup=warmup, rep=rep)
flops_per_matmul = 2. * BATCH * H * N_CTX * N_CTX * D_HEAD
total_flops = 2 * flops_per_matmul
if causal:
total_flops *= 0.5
if mode == 'bwd':
total_flops *= 2.5 # 2.0(bwd) + 0.5(recompute)
return total_flops / ms * 1e-9
# only works on post-Ampere GPUs right now
bench_flash_attention.run(save_path='.', print_data=True) | FlashAttention20Triton-main | flashtriton/attention.py |
# Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""Attention layers."""
import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
from einops import rearrange
from packaging import version
from torch import nn
from llmfoundry.models.layers.fc import FC_CLASS_REGISTRY
from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
def _reset_is_causal(num_query_tokens: int, num_key_tokens: int,
original_is_causal: bool):
# disable causal when it is not needed
# necessary for flash & triton for generation with kv_cache
if original_is_causal and num_query_tokens != num_key_tokens:
if num_query_tokens != 1:
raise NotImplementedError(
'MPT does not support query and key with different number of tokens, unless number of query tokens is 1.'
)
else:
return False
return original_is_causal
def scaled_multihead_dot_product_attention(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
q = rearrange(query, 'b s (h d) -> b h s d', h=n_heads)
kv_n_heads = 1 if multiquery else n_heads
k = rearrange(key, 'b s (h d) -> b h d s', h=kv_n_heads)
v = rearrange(value, 'b s (h d) -> b h s d', h=kv_n_heads)
if past_key_value is not None:
# attn_impl: flash & triton use kernels which expect input shape [b, s, h, d_head].
# kv_cache is therefore stored using that shape.
# attn_impl: torch stores the kv_cache in the ordering which is most advantageous
# for its attn computation ie
# keys are stored as tensors with shape [b, h, d_head, s] and
# values are stored as tensors with shape [b, h, s, d_head]
if len(past_key_value) != 0:
k = torch.cat([past_key_value[0], k], dim=3)
v = torch.cat([past_key_value[1], v], dim=2)
past_key_value = (k, v)
b, _, s_q, d = q.shape
s_k = k.size(-1)
if softmax_scale is None:
softmax_scale = 1 / math.sqrt(d)
attn_weight = q.matmul(k) * softmax_scale
if attn_bias is not None:
# clamp to 0 necessary for torch 2.0 compile()
_s_q = max(0, attn_bias.size(2) - s_q)
_s_k = max(0, attn_bias.size(3) - s_k)
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if (attn_bias.size(-1) != 1 and
attn_bias.size(-1) != s_k) or (attn_bias.size(-2) != 1 and
attn_bias.size(-2) != s_q):
raise RuntimeError(
f'attn_bias (shape: {attn_bias.shape}) is expected to broadcast to shape: {attn_weight.shape}.'
)
attn_weight = attn_weight + attn_bias
min_val = torch.finfo(q.dtype).min
if key_padding_mask is not None:
if attn_bias is not None:
warnings.warn(
'Propogating key_padding_mask to the attention module ' +\
'and applying it within the attention module can cause ' +\
'unneccessary computation/memory usage. Consider integrating ' +\
'into attn_bias once and passing that to each attention ' +\
'module instead.'
)
attn_weight = attn_weight.masked_fill(
~key_padding_mask.view((b, 1, 1, s_k)), min_val)
if is_causal and (not q.size(2) == 1):
s = max(s_q, s_k)
causal_mask = attn_weight.new_ones(s, s, dtype=torch.float32)
causal_mask = causal_mask.tril()
causal_mask = causal_mask.to(torch.bool)
causal_mask = ~causal_mask
causal_mask = causal_mask[-s_q:, -s_k:]
attn_weight = attn_weight.masked_fill(causal_mask.view(1, 1, s_q, s_k),
min_val)
attn_weight = torch.softmax(attn_weight, dim=-1)
if dropout_p:
attn_weight = torch.nn.functional.dropout(attn_weight,
p=dropout_p,
training=training,
inplace=True)
out = attn_weight.to(v.dtype).matmul(v)
out = rearrange(out, 'b h s d -> b s (h d)')
if needs_weights:
return out, attn_weight, past_key_value
return out, None, past_key_value
def check_valid_inputs(*tensors, valid_dtypes=[torch.float16, torch.bfloat16]):
for tensor in tensors:
if tensor.dtype not in valid_dtypes:
raise TypeError(f'{tensor.dtype=} must be in {valid_dtypes=}.')
if not tensor.is_cuda:
raise TypeError(f'Inputs must be cuda tensors ({tensor.is_cuda=}).')
def flash_attn_fn(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from flash_attn import bert_padding, flash_attn_interface # type: ignore # yapf: disable # isort: skip
except:
raise RuntimeError('Please install flash-attn==1.0.3.post0')
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
# clamp to 0 necessary for torch 2.0 compile()
_s_q = max(0, attn_bias.size(2) - query.size(1))
_s_k = max(0, attn_bias.size(3) - key.size(1))
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if attn_bias is not None:
raise NotImplementedError(f'attn_bias not implemented for flash attn.')
batch_size, seqlen = query.shape[:2]
if key_padding_mask is None:
key_padding_mask = torch.ones_like(key[:, :, 0], dtype=torch.bool)
query_padding_mask = key_padding_mask[:, -query.size(1):]
query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(
query, query_padding_mask)
query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)
key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(
key, key_padding_mask)
key_unpad = rearrange(key_unpad,
'nnz (h d) -> nnz h d',
h=1 if multiquery else n_heads)
value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)
value_unpad = rearrange(value_unpad,
'nnz (h d) -> nnz h d',
h=1 if multiquery else n_heads)
if multiquery:
# Expanding a tensor does not allocate new memory, but only creates a new
# view on the existing tensor where a dimension of size one is expanded
# to a larger size by setting the stride to 0.
# - pytorch docs
#
# hopefully the kernels can utilize this and we're jot just wasting BW here
key_unpad = key_unpad.expand(key_unpad.size(0), n_heads,
key_unpad.size(-1))
value_unpad = value_unpad.expand(value_unpad.size(0), n_heads,
value_unpad.size(-1))
dropout_p = dropout_p if training else 0.0
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
output_unpad = flash_attn_interface.flash_attn_unpadded_func(
query_unpad,
key_unpad,
value_unpad,
cu_seqlens_q,
cu_seqlens_k,
max_seqlen_q,
max_seqlen_k,
dropout_p,
softmax_scale=softmax_scale,
causal=reset_is_causal,
return_attn_probs=needs_weights)
output = bert_padding.pad_input(
rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size,
seqlen)
return output, None, past_key_value
def triton_flash_attn_fn(
query,
key,
value,
n_heads,
past_key_value=None,
softmax_scale=None,
attn_bias=None,
key_padding_mask=None,
is_causal=False,
dropout_p=0.0,
training=False,
needs_weights=False,
multiquery=False,
):
try:
from llmfoundry.models.layers.flash_attn_triton import flash_attn_func
except:
_installed = False
if version.parse(torch.__version__) < version.parse('2.0.0'):
_installed = True
# if torch1.13.1 revert to using triton flash attn from HazyResearch
# with flash-attn==1.0.3.post0 and triton==2.0.0.dev20221202
try:
from flash_attn.flash_attn_triton import flash_attn_func
except:
_installed = False
if not _installed:
# installing triton-pre-mlir works for both torch1.13.1 and torch2.0+
# default recommendation is to install this variant
raise RuntimeError(
'Requirements for `attn_impl: triton` not installed. Either (1) have a CUDA-compatible GPU '
'and `pip install .[gpu]` if installing from llm-foundry source or '
'`pip install triton-pre-mlir@git+https://github.com/vchiley/triton.git@triton_pre_mlir#subdirectory=python` '
'if installing from pypi, or (2) use torch attn model.attn_config.attn_impl=torch (torch attn_impl will be slow). '
'Note: (1) requires you have CMake and PyTorch already installed.'
)
check_valid_inputs(query, key, value)
if past_key_value is not None:
if len(past_key_value) != 0:
key = torch.cat([past_key_value[0], key], dim=1)
value = torch.cat([past_key_value[1], value], dim=1)
past_key_value = (key, value)
if attn_bias is not None:
# clamp to 0 necessary for torch 2.0 compile()
_s_q = max(0, attn_bias.size(2) - query.size(1))
_s_k = max(0, attn_bias.size(3) - key.size(1))
attn_bias = attn_bias[:, :, _s_q:, _s_k:]
if dropout_p:
raise NotImplementedError(
f'Dropout not implemented for attn_impl: triton.')
if needs_weights:
raise NotImplementedError(
f'attn_impl: triton cannot return attn weights.')
if key_padding_mask is not None:
warnings.warn(
'Propagating key_padding_mask to the attention module ' +\
'and applying it within the attention module can cause ' +\
'unnecessary computation/memory usage. Consider integrating ' +\
'into attn_bias once and passing that to each attention ' +\
'module instead.'
)
b_size, s_k = key_padding_mask.shape[:2]
if attn_bias is None:
attn_bias = query.new_zeros(b_size, 1, 1, s_k)
attn_bias = attn_bias.masked_fill(
~key_padding_mask.view((b_size, 1, 1, s_k)),
torch.finfo(query.dtype).min)
query = rearrange(query, 'b s (h d) -> b s h d', h=n_heads)
key = rearrange(key, 'b s (h d) -> b s h d', h=1 if multiquery else n_heads)
value = rearrange(value,
'b s (h d) -> b s h d',
h=1 if multiquery else n_heads)
if multiquery:
# necessary to repeat instead of expand tensor because
# output contains NaN in edge cases such as with head dimension = 8
key = key.repeat(1, 1, n_heads, 1)
value = value.repeat(1, 1, n_heads, 1)
reset_is_causal = _reset_is_causal(query.size(1), key.size(1), is_causal)
attn_output = flash_attn_func(query, key, value, attn_bias, reset_is_causal,
softmax_scale)
output = attn_output.view(*attn_output.shape[:2], -1)
return output, None, past_key_value
class MultiheadAttention(nn.Module):
"""Multi-head self attention.
Using torch or triton attention implemetation enables user to also use
additive bias.
"""
def __init__(
self,
d_model: int,
n_heads: int,
attn_impl: str = 'triton',
clip_qkv: Optional[float] = None,
qk_ln: bool = False,
softmax_scale: Optional[float] = None,
attn_pdrop: float = 0.0,
norm_type: str = 'low_precision_layernorm',
fc_type: str = 'torch',
verbose: int = 0,
device: Optional[str] = None,
):
super().__init__()
self.attn_impl = attn_impl
self.clip_qkv = clip_qkv
self.qk_ln = qk_ln
self.d_model = d_model
self.n_heads = n_heads
self.softmax_scale = softmax_scale
if self.softmax_scale is None:
self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads)
self.attn_dropout_p = attn_pdrop
fc_kwargs = {}
if fc_type != 'te':
fc_kwargs['device'] = device
self.Wqkv = FC_CLASS_REGISTRY[fc_type](
self.d_model,
3 * self.d_model,
**fc_kwargs,
)
# for param init fn; enables shape based init of fused layers
fuse_splits = (d_model, 2 * d_model)
self.Wqkv._fused = (0, fuse_splits) # type: ignore
if self.qk_ln:
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
self.q_ln = norm_class(self.d_model, device=device)
self.k_ln = norm_class(self.d_model, device=device)
if self.attn_impl == 'flash':
self.attn_fn = flash_attn_fn
elif self.attn_impl == 'triton':
self.attn_fn = triton_flash_attn_fn
if verbose:
warnings.warn(
'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
'it uses more memory. When training larger models this can trigger ' +\
'alloc retries which hurts performance. If encountered, we recommend ' +\
'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
)
elif self.attn_impl == 'torch':
self.attn_fn = scaled_multihead_dot_product_attention
if torch.cuda.is_available() and verbose:
warnings.warn(
'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
'`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
'we recommend using `attn_impl: triton`.'
)
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
self.out_proj = FC_CLASS_REGISTRY[fc_type](
self.d_model,
self.d_model,
**fc_kwargs,
)
self.out_proj._is_residual = True # type: ignore
def forward(
self,
x,
past_key_value=None,
attn_bias=None,
attention_mask=None,
is_causal=True,
needs_weights=False,
):
qkv = self.Wqkv(x)
if self.clip_qkv:
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query, key, value = qkv.chunk(3, dim=2)
key_padding_mask = attention_mask
if self.qk_ln:
# Applying layernorm to qk
dtype = query.dtype
query = self.q_ln(query).to(dtype)
key = self.k_ln(key).to(dtype)
context, attn_weights, past_key_value = self.attn_fn(
query,
key,
value,
self.n_heads,
past_key_value=past_key_value,
softmax_scale=self.softmax_scale,
attn_bias=attn_bias,
key_padding_mask=key_padding_mask,
is_causal=is_causal,
dropout_p=self.attn_dropout_p,
training=self.training,
needs_weights=needs_weights,
)
return self.out_proj(context), attn_weights, past_key_value
class MultiQueryAttention(nn.Module):
"""Multi-Query self attention.
Using torch or triton attention implemetation enables user to also use
additive bias.
"""
def __init__(
self,
d_model: int,
n_heads: int,
attn_impl: str = 'triton',
clip_qkv: Optional[float] = None,
qk_ln: bool = False,
softmax_scale: Optional[float] = None,
attn_pdrop: float = 0.0,
norm_type: str = 'low_precision_layernorm',
fc_type: str = 'torch',
verbose: int = 0,
device: Optional[str] = None,
):
super().__init__()
self.attn_impl = attn_impl
self.clip_qkv = clip_qkv
self.qk_ln = qk_ln
self.d_model = d_model
self.n_heads = n_heads
self.head_dim = d_model // n_heads
self.softmax_scale = softmax_scale
if self.softmax_scale is None:
self.softmax_scale = 1 / math.sqrt(self.head_dim)
self.attn_dropout_p = attn_pdrop
fc_kwargs = {}
if fc_type != 'te':
fc_kwargs['device'] = device
# NOTE: if we ever want to make attn TensorParallel, I'm pretty sure we'll
# want to split Wqkv into Wq and Wkv where Wq can be TensorParallel but
# Wkv shouldn't be TensorParallel
# - vchiley
self.Wqkv = FC_CLASS_REGISTRY[fc_type](
d_model,
d_model + 2 * self.head_dim,
**fc_kwargs,
)
# for param init fn; enables shape based init of fused layers
fuse_splits = (d_model, d_model + self.head_dim)
self.Wqkv._fused = (0, fuse_splits) # type: ignore
if self.qk_ln:
norm_class = NORM_CLASS_REGISTRY[norm_type.lower()]
self.q_ln = norm_class(d_model, device=device)
self.k_ln = norm_class(self.head_dim, device=device)
if self.attn_impl == 'flash':
self.attn_fn = flash_attn_fn
elif self.attn_impl == 'triton':
self.attn_fn = triton_flash_attn_fn
if verbose:
warnings.warn(
'While `attn_impl: triton` can be faster than `attn_impl: flash` ' +\
'it uses more memory. When training larger models this can trigger ' +\
'alloc retries which hurts performance. If encountered, we recommend ' +\
'using `attn_impl: flash` if your model does not use `alibi` or `prefix_lm`.'
)
elif self.attn_impl == 'torch':
self.attn_fn = scaled_multihead_dot_product_attention
if torch.cuda.is_available() and verbose:
warnings.warn(
'Using `attn_impl: torch`. If your model does not use `alibi` or ' +\
'`prefix_lm` we recommend using `attn_impl: flash` otherwise ' +\
'we recommend using `attn_impl: triton`.'
)
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
self.out_proj = FC_CLASS_REGISTRY[fc_type](
self.d_model,
self.d_model,
**fc_kwargs,
)
self.out_proj._is_residual = True # type: ignore
def forward(
self,
x,
past_key_value=None,
attn_bias=None,
attention_mask=None,
is_causal=True,
needs_weights=False,
):
qkv = self.Wqkv(x)
if self.clip_qkv:
qkv = qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query, key, value = qkv.split(
[self.d_model, self.head_dim, self.head_dim], dim=2)
key_padding_mask = attention_mask
if self.qk_ln:
# Applying layernorm to qk
dtype = query.dtype
query = self.q_ln(query).to(dtype)
key = self.k_ln(key).to(dtype)
context, attn_weights, past_key_value = self.attn_fn(
query,
key,
value,
self.n_heads,
past_key_value=past_key_value,
softmax_scale=self.softmax_scale,
attn_bias=attn_bias,
key_padding_mask=key_padding_mask,
is_causal=is_causal,
dropout_p=self.attn_dropout_p,
training=self.training,
needs_weights=needs_weights,
multiquery=True,
)
return self.out_proj(context), attn_weights, past_key_value
def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal,
use_sequence_id):
if attn_impl == 'flash':
return None
elif attn_impl in ['torch', 'triton']:
if alibi:
if (prefix_lm or not causal) or use_sequence_id:
return (1, n_heads, seq_len, seq_len)
return (1, n_heads, 1, seq_len)
elif prefix_lm or use_sequence_id:
return (1, 1, seq_len, seq_len)
return None
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
def build_attn_bias(
attn_impl,
attn_bias,
n_heads,
seq_len,
causal=False,
alibi=False,
alibi_bias_max=8,
):
if attn_impl == 'flash':
return None
elif attn_impl in ['torch', 'triton']:
if alibi:
# in place add alibi to attn bias
device, dtype = attn_bias.device, attn_bias.dtype
attn_bias = attn_bias.add(
build_alibi_bias(
n_heads,
seq_len,
full=not causal,
alibi_bias_max=alibi_bias_max,
device=device,
dtype=dtype,
))
return attn_bias
else:
raise ValueError(f'{attn_impl=} is an invalid setting.')
def gen_slopes(n_heads, alibi_bias_max=8, device=None):
_n_heads = 2**math.ceil(math.log2(n_heads))
m = torch.arange(1, _n_heads + 1, dtype=torch.float32, device=device)
m = m.mul(alibi_bias_max / _n_heads)
slopes = (1. / torch.pow(2, m))
if _n_heads != n_heads:
# if n_heads is not a power of two,
# Huggingface and FasterTransformer calculate slopes normally,
# then return this strided concatenation of slopes
slopes = torch.concat([slopes[1::2], slopes[::2]])[:n_heads]
return slopes.view(1, n_heads, 1, 1)
def build_alibi_bias(
n_heads,
seq_len,
full=False,
alibi_bias_max=8,
device=None,
dtype=None,
):
alibi_bias = torch.arange(1 - seq_len, 1, dtype=torch.int32,
device=device).view(1, 1, 1, seq_len)
if full:
# generate 1 x Heads x SeqLen x SeqLen alibi bias mask
# otherwise the mask is 1 x Heads x 1 x SeqLen (which is broadcast to the appropriate size)
alibi_bias = alibi_bias - torch.arange(
1 - seq_len, 1, dtype=torch.int32, device=device).view(
1, 1, seq_len, 1)
alibi_bias = alibi_bias.abs().mul(-1)
slopes = gen_slopes(n_heads, alibi_bias_max, device=device)
alibi_bias = alibi_bias * slopes
return alibi_bias.to(dtype=dtype)
ATTN_CLASS_REGISTRY = {
'multihead_attention': MultiheadAttention,
'multiquery_attention': MultiQueryAttention,
} | FlashAttention20Triton-main | flashtriton/flash_mpt.py |
import math
import torch
from functools import partial
from torch import nn, einsum
from torch.autograd.function import Function
from einops import rearrange
from torch.jit import fork, wait
from torch.cuda.amp import autocast, GradScaler
from torch.nn import DataParallel
# constants
EPSILON = 1e-10
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# flash attention forwards and backwards
# flash attention v1 - https://arxiv.org/abs/2205.14135
# flash attention v2 - https://tridao.me/publications/flash2/flash2.pdf
class FlashAttentionFunction(Function):
@staticmethod
@torch.no_grad()
def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size):
""" Algorithm 1 in the v2 paper """
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
o = torch.zeros_like(q)
all_row_sums = torch.zeros((*q.shape[:-1], 1), device = device)
all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device = device)
scale = (q.shape[-1] ** -0.5)
num_row_tiles = math.ceil(q.shape[-2] / q_bucket_size)
num_col_tiles = math.ceil(k.shape[-2] / k_bucket_size)
if exists(mask) and mask.ndim == 2:
mask = rearrange(mask, 'b n -> b 1 1 n')
if not exists(mask):
col_masks = (None,) * num_col_tiles
mask = (col_masks,) * num_row_tiles
else:
mask = ((mask,) * num_row_tiles) if mask.shape[-2] == 1 else mask.split(q_bucket_size, dim = -2)
mask = tuple(((row_mask,) * num_col_tiles) if row_mask.shape[-1] == 1 else row_mask.split(k_bucket_size, dim = -1) for row_mask in mask)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
mask,
all_row_sums.split(q_bucket_size, dim = -2),
all_row_maxes.split(q_bucket_size, dim = -2),
)
for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if exists(col_mask):
attn_weights.masked_fill_(~col_mask, max_neg_value)
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
block_row_maxes = attn_weights.amax(dim = -1, keepdims = True)
new_row_maxes = torch.maximum(block_row_maxes, row_maxes)
exp_weights = torch.exp(attn_weights - new_row_maxes)
if exists(col_mask):
exp_weights.masked_fill_(~col_mask, 0.)
block_row_sums = exp_weights.sum(dim = -1, keepdims = True).clamp(min = EPSILON)
exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc)
exp_row_max_diff = torch.exp(row_maxes - new_row_maxes)
new_row_sums = exp_row_max_diff * row_sums + block_row_sums
oc.mul_(exp_row_max_diff).add_(exp_values)
row_maxes.copy_(new_row_maxes)
row_sums.copy_(new_row_sums)
oc.div_(row_sums)
lse = all_row_sums.log() + all_row_maxes
ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size)
ctx.save_for_backward(q, k, v, o, lse)
return o
@staticmethod
@torch.no_grad()
def backward(ctx, do):
""" Algorithm 2 in the v2 paper """
causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args
q, k, v, o, lse = ctx.saved_tensors
device = q.device
max_neg_value = -torch.finfo(q.dtype).max
qk_len_diff = max(k.shape[-2] - q.shape[-2], 0)
dq = torch.zeros_like(q)
dk = torch.zeros_like(k)
dv = torch.zeros_like(v)
row_splits = zip(
q.split(q_bucket_size, dim = -2),
o.split(q_bucket_size, dim = -2),
do.split(q_bucket_size, dim = -2),
mask,
lse.split(q_bucket_size, dim = -2),
dq.split(q_bucket_size, dim = -2)
)
for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits):
q_start_index = ind * q_bucket_size - qk_len_diff
col_splits = zip(
k.split(k_bucket_size, dim = -2),
v.split(k_bucket_size, dim = -2),
dk.split(k_bucket_size, dim = -2),
dv.split(k_bucket_size, dim = -2),
row_mask
)
for k_ind, (kc, vc, dkc, dvc, col_mask) in enumerate(col_splits):
k_start_index = k_ind * k_bucket_size
attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale
if causal and q_start_index < (k_start_index + k_bucket_size - 1):
causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1)
attn_weights.masked_fill_(causal_mask, max_neg_value)
p = torch.exp(attn_weights - lsec)
if exists(col_mask):
p.masked_fill_(~col_mask, 0.)
dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc)
dp = einsum('... i d, ... j d -> ... i j', doc, vc)
D = (doc * oc).sum(dim = -1, keepdims = True)
ds = p * scale * (dp - D)
dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc)
dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc)
dqc.add_(dq_chunk)
dkc.add_(dk_chunk)
dvc.add_(dv_chunk)
return dq, dk, dv, None, None, None, None
# main class
# just flash attention in plain pytorch
# it will be way slower than implementing it in CUDA
# for tinkering and educational purposes
class FlashAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
causal = False,
q_bucket_size = 512,
k_bucket_size = 1024,
parallel = False,
mixed_precision = False
):
super().__init__()
self.heads = heads
self.causal = causal
self.parallel = parallel
self.mixed_precision = mixed_precision
inner_dim = heads * dim_head
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
# memory efficient attention related parameters
# can be overriden on forward
self.q_bucket_size = q_bucket_size
self.k_bucket_size = k_bucket_size
if self.parallel:
self.model = DataParallel(self)
if self.mixed_precision:
self.scaler = GradScaler()
def forward(
self,
x,
context = None,
mask = None,
q_bucket_size = None,
k_bucket_size = None,
):
q_bucket_size = default(q_bucket_size, self.q_bucket_size)
k_bucket_size = default(k_bucket_size, self.k_bucket_size)
h = self.heads
context = default(context, x)
q = self.to_q(x)
k, v = self.to_kv(context).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
if self.parallel:
# Split the input data into chunks and move each chunk to the correct GPU
num_gpus = torch.cuda.device_count()
x_chunks = x.split(x.size(0) // num_gpus)
x_chunks = [chunk.to(f'cuda:{i}') for i, chunk in enumerate(x_chunks)]
q = x_chunks
if self.mixed_precision:
# Use autocast to allow operations to run in lower precision
with autocast():
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
else:
out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out) | FlashAttention20Triton-main | flashtriton/flash_torch.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import math
from dataclasses import dataclass
from typing import Any, Optional, Tuple
import fairscale.nn.model_parallel.initialize as fs_init
import torch
import torch.nn.functional as F
from fairscale.nn.model_parallel.layers import (
ColumnParallelLinear,
ParallelEmbedding,
RowParallelLinear,
)
from torch import nn
@dataclass
class ModelArgs:
dim: int = 4096
n_layers: int = 32
n_heads: int = 32
n_kv_heads: Optional[int] = None
vocab_size: int = -1 # defined later by tokenizer
multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
ffn_dim_multiplier: Optional[float] = None
norm_eps: float = 1e-5
max_batch_size: int = 32
max_seq_len: int = 2048
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device) # type: ignore
freqs = torch.outer(t, freqs).float() # type: ignore
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
return freqs_cis
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
ndim = x.ndim
assert 0 <= 1 < ndim
assert freqs_cis.shape == (x.shape[1], x.shape[-1])
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
return freqs_cis.view(*shape)
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
"""torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
bs, slen, n_kv_heads, head_dim = x.shape
if n_rep == 1:
return x
return (
x[:, :, :, None, :]
.expand(bs, slen, n_kv_heads, n_rep, head_dim)
.reshape(bs, slen, n_kv_heads * n_rep, head_dim)
)
class Attention(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
model_parallel_size = fs_init.get_model_parallel_world_size()
self.n_local_heads = args.n_heads // model_parallel_size
self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
self.n_rep = self.n_local_heads // self.n_local_kv_heads
self.head_dim = args.dim // args.n_heads
self.wq = ColumnParallelLinear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wk = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wv = ColumnParallelLinear(
args.dim,
self.n_kv_heads * self.head_dim,
bias=False,
gather_output=False,
init_method=lambda x: x,
)
self.wo = RowParallelLinear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
input_is_parallel=True,
init_method=lambda x: x,
)
self.cache_k = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
self.cache_v = torch.zeros(
(
args.max_batch_size,
args.max_seq_len,
self.n_local_kv_heads,
self.head_dim,
)
).cuda()
def forward(
self,
x: torch.Tensor,
start_pos: int,
freqs_cis: torch.Tensor,
mask: Optional[torch.Tensor],
):
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
# repeat k/v heads if n_kv_heads < n_heads
keys = repeat_kv(keys, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
values = repeat_kv(values, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
keys = keys.transpose(1, 2)
values = values.transpose(1, 2)
scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
if mask is not None:
scores = scores + mask # (bs, n_local_heads, seqlen, cache_len + seqlen)
scores = F.softmax(scores.float(), dim=-1).type_as(xq)
output = torch.matmul(scores, values) # (bs, n_local_heads, seqlen, head_dim)
output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
return self.wo(output)
| FlashAttention20Triton-main | flashtriton/lama.py |
from setuptools import find_packages, setup
setup(
name='gato-tf',
version='0.0.2',
description='Unofficial Gato: A Generalist Agent',
url='https://github.com/OrigamiDream/gato.git',
author='OrigamiDream',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=[
'tensorflow>=2.11',
],
keywords=[
'deep learning',
'gato',
'tensorflow',
'generalist agent'
]
)
| GATO-by-Deepmind-main | setup.py |
import copy
from typing import Dict, Any
class GatoConfig:
@staticmethod
def large():
return GatoConfig(num_transformer_blocks=24,
num_attention_heads=16,
layer_width=2048,
feedforward_hidden_size=8192,
key_value_size=128)
@staticmethod
def baseline():
return GatoConfig(num_transformer_blocks=12,
num_attention_heads=12,
layer_width=1536,
feedforward_hidden_size=6144,
key_value_size=128)
@staticmethod
def small():
return GatoConfig(num_transformer_blocks=8,
num_attention_heads=24,
layer_width=768,
feedforward_hidden_size=3072,
key_value_size=32)
def __init__(self, **kwargs):
self.input_dim = kwargs.pop('input_dim', 768)
self.img_patch_size = kwargs.pop('img_patch_size', 16)
# Section 2.3. Training
self.token_sequence_length = kwargs.pop('token_sequence_length', 1024)
# Section 2.1. Tokenization
# Text - SentencePiece
self.vocabulary_size = kwargs.pop('vocabulary_size', 32000)
# Discrete values
self.actions_size = kwargs.pop('actions_size', 1024)
# Continuous values
self.continuous_values_size = kwargs.pop('continuous_values_size', 1024)
# Appendix C.1. Transformer Hyperparameters
self.num_transformer_blocks = kwargs.pop('num_transformer_blocks', 8)
self.num_attention_heads = kwargs.pop('num_attention_heads', 24)
self.layer_width = kwargs.pop('layer_width', 768)
self.feedforward_hidden_size = kwargs.pop('feedforward_hidden_size', 3072)
self.key_value_size = kwargs.pop('key_value_size', 32)
# Appendix E. Regularization
self.dropout_rate = kwargs.pop('dropout_rate', 0.1)
# Appendix C.2. Embedding Function
self.num_group_norm_groups = kwargs.pop('num_group_norm_groups', 32)
# Appendix C.3. Position Encodings > Patch Position Encodings
self.discretize_depth = kwargs.pop('discretize_depth', 128)
# Appendix C.3. Position Encodings > Local Observation Position Encodings
self.local_position_encoding_size = kwargs.pop('local_position_encoding_size', 512)
@property
def embedding_input_size(self):
return self.vocabulary_size + self.continuous_values_size + self.actions_size + 1
@property
def output_target_size(self):
return self.vocabulary_size + self.actions_size
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
return output
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "GatoConfig":
config = cls(**config_dict)
return config
| GATO-by-Deepmind-main | gato/config.py |
from gato.config import GatoConfig
from gato.models import Gato
| GATO-by-Deepmind-main | gato/__init__.py |
import tensorflow as tf
from tensorflow.keras import layers, models
from gato import GatoConfig
from typing import Dict, Any, Union
def _randomized_positions(from_v, to_v):
pos = tf.random.uniform(from_v.shape, minval=0, maxval=1, dtype=tf.float32)
pos = pos * tf.cast(to_v - from_v, dtype=tf.float32)
pos = tf.cast(pos, dtype=tf.int32)
return pos
def _rounded_mean_positions(from_v, to_v):
pos = tf.cast(from_v + to_v, tf.float32)
pos = pos / 2
pos = tf.round(pos)
return pos
def _broadcast(row_pos, col_pos, row_ones, col_ones):
# broadcast (5,) to (20,) with column-axis
row_pos = tf.expand_dims(row_pos, 1)
row_pos = tf.matmul(row_pos, col_ones, transpose_b=True)
row_pos = tf.reshape(row_pos, (-1,))
row_pos = tf.stop_gradient(row_pos)
# broadcast (4,) to (20,) with row-axis
col_pos = tf.expand_dims(col_pos, 1)
col_pos = tf.matmul(row_ones, col_pos, transpose_b=True)
col_pos = tf.reshape(col_pos, (-1,))
col_pos = tf.stop_gradient(col_pos)
return row_pos, col_pos
class PatchPositionEncoding(layers.Layer):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable=True, name=None, *args, **kwargs):
"""
Appendix C.3. Position Encodings
"""
super(PatchPositionEncoding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding_dim = self.config.layer_width
self.discretize_depth = self.config.discretize_depth
self.patch_size = self.config.img_patch_size
self.row_embedding = layers.Embedding(self.discretize_depth, self.embedding_dim, name='row_embedding')
self.col_embedding = layers.Embedding(self.discretize_depth, self.embedding_dim, name='col_embedding')
def _discretize(self, pos):
return tf.round(pos * self.discretize_depth)
def _discretize_interval(self, interval):
pos_from, pos_to = interval
return self._discretize(pos_from), self._discretize(pos_to)
def call(self, inputs, *args, **kwargs):
# Appendix C.3. Position Encodings; Figure 15 | Patch position encodings.
training = kwargs['training'] if 'training' in kwargs else False
# input_ids must already be embedded by the resnet embedding function.
# row_pos and col_pos must be intervals which is tuple of (pos_from, pos_to)
# row_pos and col_pos must be normalized between [0, 1] to show their relativity.
input_ids, (row_pos, col_pos) = inputs
row_pos_from, row_pos_to = self._discretize_interval(row_pos)
col_pos_from, col_pos_to = self._discretize_interval(col_pos)
if training:
# > During training a random index is uniformly sampled from the quantized interval.
row_pos = row_pos_from + _randomized_positions(row_pos_from, row_pos_to)
col_pos = col_pos_from + _randomized_positions(col_pos_from, col_pos_to)
else:
# > During evaluation we deterministically take the (rounded) mean of the interval.
row_pos = _rounded_mean_positions(row_pos_from, row_pos_to)
col_pos = _rounded_mean_positions(col_pos_from, col_pos_to)
col_pos = tf.cast(col_pos, dtype=tf.int32)
row_pos = tf.cast(row_pos, dtype=tf.int32)
# > Once row and column position encoding are retrieved from the embedding table,
# > they are added onto the token embedding produced by the resnet embedding function.
return input_ids + self.row_embedding(row_pos) + self.col_embedding(col_pos)
def get_config(self):
config = super(PatchPositionEncoding, self).get_config()
config.update({
'config': self.config.to_dict(),
})
return config
class ResidualUnit(layers.Layer):
def __init__(self, num_groups: int, filters: int, trainable=True, name=None, *args, **kwargs):
super(ResidualUnit, self).__init__(trainable=trainable, name=name, *args, **kwargs)
self.num_groups = num_groups
self.filters = filters
self.gn1 = self.gn2 = None
self.conv1 = self.conv2 = None
self.conv_proj = self.gn_proj = None
def build(self, input_shape):
self.gn1 = layers.GroupNormalization(groups=self.num_groups, name='gn1')
self.gn2 = layers.GroupNormalization(groups=self.num_groups, name='gn2')
self.conv1 = layers.Conv2D(filters=self.filters // 2, kernel_size=(3, 3), strides=(1, 1),
use_bias=False, padding='same', name='conv1')
self.conv2 = layers.Conv2D(filters=self.filters, kernel_size=(3, 3), strides=(2, 2),
use_bias=False, padding='same', name='conv2')
self.conv_proj = layers.Conv2D(filters=self.filters, kernel_size=(1, 1), strides=(2, 2),
use_bias=False, padding='same', name='conv_proj')
self.gn_proj = layers.GroupNormalization(groups=self.num_groups, name='gn_proj')
def call(self, inputs, *args, **kwargs):
# Supplementary Material B. Agent Data Tokenization Details; Figure 16
# > This block uses the v2 ResNet architecture, GroupNorm (instead of LayerNorm) normalization,
# > and GELU (instead RELU) activation functions.
x = inputs
residual = self.conv_proj(self.gn_proj(x))
x = tf.nn.gelu(self.gn1(x))
x = self.conv1(x)
x = tf.nn.gelu(self.gn2(x))
x = self.conv2(x)
return x + residual
class ResidualEmbedding(layers.Layer):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable=True, name=None, *args, **kwargs):
"""
Appendix C.2. Embedding Function
"""
super(ResidualEmbedding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.root_conv = self.conv_proj = None
self.residual_units = None
self.num_patches = None
def build(self, input_shape):
if self.config.input_dim != self.config.layer_width:
self.conv_proj = layers.Conv2D(filters=self.config.layer_width,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
use_bias=False,
name='conv_proj')
self.root_conv = models.Sequential([
layers.Conv2D(filters=96, kernel_size=(7, 7), strides=(2, 2),
use_bias=False, padding='same', name='conv_root'),
layers.GroupNormalization(groups=self.config.num_group_norm_groups, name='gn_root'),
layers.Activation('gelu', name='act_root')
])
self.residual_units = [ResidualUnit(num_groups=self.config.num_group_norm_groups,
filters=96 * 2 ** (i + 1),
name='residual_unit_{}'.format(i + 1))
for i in range(3)]
def call(self, inputs, *args, **kwargs):
# Section 2.1 Tokenization.
x = self.root_conv(inputs)
# NOTE: Page 3-4, Section 2.2 Embedding input tokens and setting output targets
# > Tokens belonging to image patches for any time-step are embedded
# > using a single ResNet block to obtain a vector per patch.
# I don't think that transforming single 16x16 patch into feature map
# with depth 768 at once does not give advantages coming from inductive bias.
# This is currently discussing in issue #2
for block in self.residual_units:
x = block(x)
if self.conv_proj is not None:
x = self.conv_proj(x)
x = tf.reshape(x, shape=(-1, inputs.shape[1], self.config.layer_width))
return x
def get_config(self):
config = super(ResidualEmbedding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
class LocalPositionEncoding(layers.Layer):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable=True, name=None, *args, **kwargs):
"""
Appendix C.3. Position Encodings > Local Observation Position Encodings
"""
super(LocalPositionEncoding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding = None
def build(self, input_shape):
self.embedding = layers.Embedding(self.config.token_sequence_length, self.config.layer_width)
self.built = True
def call(self, inputs, *args, **kwargs):
# Appendix C.3. Position Encodings > Local Observation Position Encodings; Figure 18 | Local position encodings.
# > Note that no position encodings are added to action tokens.
# So I added `obs_mask` to mask the action token into zeros.
obs_pos, obs_mask = inputs
embed = self.embedding(obs_pos)
ones = tf.ones((embed.shape[0], 1, self.config.layer_width), dtype=tf.float32)
obs_mask = tf.cast(obs_mask, dtype=tf.float32)
obs_mask = tf.matmul(obs_mask, ones, transpose_a=True)
return embed * obs_mask
def get_config(self):
config = super(LocalPositionEncoding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
class DiscreteEmbedding(layers.Layer):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable=True, name=None, *args, **kwargs):
super(DiscreteEmbedding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding = None
def build(self, input_shape):
# Appendix C.1. Transformer Hyperparameters
# Shared Embedding
with tf.name_scope('discrete_shared_embedding'):
self.embedding = layers.Embedding(self.config.embedding_input_size,
self.config.layer_width,
name='discrete_embedding')
self.built = True
def call(self, inputs, *args, **kwargs):
return self.embedding(inputs)
def get_config(self):
config = super(DiscreteEmbedding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
| GATO-by-Deepmind-main | gato/models/embedding.py |
import tensorflow as tf
from gato.models.transformer import TransformerBlock
from gato.models.embedding import PatchPositionEncoding, ResidualEmbedding, LocalPositionEncoding, DiscreteEmbedding
from gato.models.tokenizers import ContinuousValueTokenizer
from tensorflow.keras import models
from gato import GatoConfig
from typing import Dict, Any, Union
class Gato(models.Model):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable: bool = True, name: str = 'Gato', **kwargs):
super(Gato, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.image_embedding = PatchEmbedding(config, trainable=trainable, name='ImagePatchEmbedding')
self.discrete_embedding = DiscreteEmbedding(config, trainable=trainable, name='DiscreteEmbedding')
self.continuous_encoding = ContinuousValueTokenizer(config, name='ContinuousValueEncoding')
self.transformer = Transformer(config, trainable=trainable, name='Transformers')
self.local_pos_encoding = LocalPositionEncoding(config, trainable=trainable, name='LocalPositionEncoding')
def call(self, inputs, training=None, mask=None):
# input_ids with (B, L, 768)
# encoding with (B, L) or (B,)
# row_pos and col_pos with tuple of (pos_from, pos_to)
# obs_pos and obs_mask with (B, L) or (B,)
input_ids, (encoding, row_pos, col_pos), (obs_pos, obs_mask) = inputs
# Encoding flags for embed masks
# 0 - image
# 1 - continuous
# 2 - discrete (actions, texts)
encoding = tf.one_hot(encoding, depth=3, dtype=tf.float32)
ones = tf.ones((input_ids.shape[0], 1, self.config.layer_width), dtype=tf.float32)
image_embed = self.image_embedding((input_ids, (row_pos, col_pos)), training=training)
image_embed *= tf.matmul(encoding[..., 0], ones, transpose_a=True) # image patch masking
# continuous value takes from first value of input_ids
continuous_embed = self.continuous_encoding(input_ids[..., 0])
continuous_embed = self.discrete_embedding(continuous_embed)
continuous_embed *= tf.matmul(encoding[..., 1], ones, transpose_a=True) # continuous value masking
discrete_embed = self.discrete_embedding(input_ids[..., 0])
discrete_embed *= tf.matmul(encoding[..., 2], ones, transpose_a=True) # discrete value masking
# Appendix C.3. Position Encodings > Local Observation Position Encodings
# add local observation position encodings
embed = image_embed + continuous_embed + discrete_embed
embed += self.local_pos_encoding((obs_pos, obs_mask))
hidden_states = self.transformer(embed)
return hidden_states
def get_config(self):
return super(Gato, self).get_config()
class Transformer(models.Model):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable: bool = True,
name: str = None,
**kwargs):
super(Transformer, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.encoders = [TransformerBlock(config=self.config, trainable=trainable, name='EncoderBlock{}'.format(idx))
for idx in range(self.config.num_transformer_blocks)]
def call(self, inputs, training=None, mask=None):
x = inputs
for encoder in self.encoders:
x = encoder(x)
return x
def get_config(self):
return super(Transformer, self).get_config()
class PatchEmbedding(models.Model):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable: bool = True,
name: str = None,
**kwargs):
super(PatchEmbedding, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.residual_embedding = ResidualEmbedding(config, trainable=trainable, name='ResidualEmbedding')
self.pos_encoding = PatchPositionEncoding(config, trainable=trainable, name='PatchPositionEncoding')
def call(self, inputs, training=None, mask=None):
input_ids, (row_pos, col_pos) = inputs
patch_size = self.config.img_patch_size
depth = self.config.input_dim // (patch_size * patch_size)
x = tf.reshape(input_ids, (-1, input_ids.shape[1], patch_size, patch_size, depth))
x = self.residual_embedding(x)
x = self.pos_encoding((x, (row_pos, col_pos)))
return x
def get_config(self):
return super(PatchEmbedding, self).get_config()
| GATO-by-Deepmind-main | gato/models/__init__.py |
import tensorflow as tf
from tensorflow.keras import layers, models, activations
from gato import GatoConfig
from typing import Dict, Any, Union
class TransformerBlock(layers.Layer):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable: bool = True,
name: str = None,
*args, **kwargs):
super(TransformerBlock, self).__init__(trainable, name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.attention = self.feed_forward = self.dropout = None
self.layer_norm1 = self.layer_norm2 = None
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
hidden_size = input_shape[-1]
self.attention = layers.MultiHeadAttention(num_heads=self.config.num_attention_heads,
key_dim=self.config.key_value_size,
value_dim=self.config.key_value_size,
dropout=self.config.dropout_rate,
name='attention')
self.dropout = layers.Dropout(self.config.dropout_rate, name='attention_dropout')
self.feed_forward = models.Sequential(layers=[
layers.Dense(units=self.config.feedforward_hidden_size,
activation='linear',
name='dense_intermediate'),
# Appendix C.1. Transformer Hyperparameters
# Activation Function: GEGLU
layers.Lambda(lambda x: activations.gelu(x, approximate=False), name='gelu'),
layers.Dropout(self.config.dropout_rate, name='dropout_intermediate'),
layers.Dense(units=hidden_size,
activation='linear',
name='dense'),
layers.Dropout(self.config.dropout_rate, name='dropout'),
], name='feed_forward')
self.layer_norm1 = layers.LayerNormalization(epsilon=1e-6, name='layer_norm1')
self.layer_norm2 = layers.LayerNormalization(epsilon=1e-6, name='layer_norm2')
def call(self, inputs, *args, **kwargs):
# Appendix C.1. Transformer Hyperparameters
# Layer Normalization: Pre-Norm
residual = inputs
x = self.layer_norm1(inputs)
x = self.attention(x, x, x)
x = self.dropout(x)
x = x + residual
residual = x
x = self.layer_norm2(inputs)
x = self.feed_forward(x)
x = x + residual
return x
def get_config(self):
config = super(TransformerBlock, self).get_config()
config.update({
'config': self.config.to_dict(),
})
return config
| GATO-by-Deepmind-main | gato/models/transformer.py |
import tensorflow as tf
from gato import GatoConfig
from tensorflow.keras import models
from typing import Union, Dict, Any
def mu_law_encode(x, mu=100, m=256):
# Appendix B. Agent Data Tokenization Details
sign = tf.math.sign(x)
numerator = tf.math.log(tf.abs(x) * mu + 1.0)
denominator = tf.math.log(m * mu + 1.0)
return (numerator / denominator) * sign
def tokenize_continuous_values(x, mu=100, m=256, bins=1024, shift=None):
# Appendix B. Agent Data Tokenization Details
# > Finally, they are discretized using bins of uniform width on the domain [-1, 1].
c = mu_law_encode(x, mu, m)
# > We use 1024 bins and shift the resulting integers
# > so they are not overlapping with the ones used for text tokens.
c = (c + 1) * (bins / 2)
c = tf.cast(c, tf.int32)
if shift is not None:
c += shift
return c
class ContinuousValueTokenizer(models.Model):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
mu=100, m=256, bins=1024,
trainable=False, name=None, **kwargs):
super(ContinuousValueTokenizer, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.mu = mu
self.m = m
self.bins = bins
def call(self, inputs, training=None, mask=None):
return tokenize_continuous_values(inputs, self.mu, self.m, self.bins, shift=self.config.vocabulary_size)
def get_config(self):
return super(ContinuousValueTokenizer, self).get_config()
| GATO-by-Deepmind-main | gato/models/tokenizers.py |
import os
from tiktokx.train import Trainer, parse_args
if __name__ == '__main__':
args = parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
data_config = {
'n_users': 12345,
'n_items': 67890
}
trainer = Trainer(data_config)
best_recall, run_time = trainer.train()
print(f'Best Recall@{args.Ks}: {best_recall}')
print(f'Total Running Time: {run_time}') | Tiktokx-main | example.py |
from tiktokx.utils import *
from tiktokx.model import Tiktok
from tiktokx.train import Trainer
| Tiktokx-main | tiktokx/__init__.py |
import os
import pickle
from time import time
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.sparse import csr_matrix
from torch.nn import init
from tiktokx.utils import build_knn_normalized_graph, build_sim, parse_args
args = parse_args()
class Tiktok(nn.Module):
def __init__(self,
n_users,
n_items,
embedding_dim,
weight_size,
dropout_list,
image_feats,
text_feats):
super().__init__()
self.n_users = n_users
self.n_items = n_items
self.embedding_size = embedding_dim
self.weight_size = weight_size
self.n_ui_layers = len(self.weight_size)
self.weight_size = [self.embedding_size] + self.weight_size
self.image_trans = nn.Linear(image_feats.shape[1], args.embed_size)
self.text_trans = nn.Linear(text_feats.shape[1], args.embed_size)
nn.init.xavier_unifrom_(self.image_trans.weight)
nn.init.xavier_uniform_(self.text_trans.weight)
self.encoder = nn.ModuleDict()
self.encoder["image_encoder"] = self.image_trans
self.encoder["text_encoder"] = self.text_trans
self.common_trans = nn.Linear(args.embed_size, args.embed_size)
nn.init.xavier_uniform_(self.common_trans.weight)
self.align = nn.ModuleDict()
self.align["common_trans"] = self.common_trans
self.user_id_embedding = nn.Embedding(n_users, self.embedding_dim)
self.item_id_embedding = nn.Embedding(n_items, self.embedding_dim)
nn.init.xavier_uniform_(self.user_id_embedding.weight)
nn.init.xavier_uniform_(self.item_id_embedding.weight)
self.image_feats = torch.tensor(image_feats).float().cuda()
self.text_feats = torch.tensor(text_feats).float().cuda()
self.image_embedding = nn.Embedding.from_pretrained(torch.Tensor(image_feats), freeze=False)
self.text_embedding = nn.Embedding.from_pretrained(torch.Tensor(text_feats), freeze=False)
self.softmax = nn.Softmax(dim=-1)
self.act = nn.Sigmoid()
self.sigmoid = nn.Sigmoid()
self.dropout = nn.Dropout(p=args.drop_rate)
self.batch_norm = nn.BatchNorm1d(args.embed_size)
self.tau = 0.5
initializer = nn.init.xavier_uniform_
self.weight_dict = nn.ParameterDict({
'w_q': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
'w_k': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
'w_v': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
'w_self_attention_item': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
'w_self_attention_user': nn.Parameter(initializer(torch.empty([args.embed_size, args.embed_size]))),
'w_self_attention_cat': nn.Parameter(initializer(torch.empty([args.head_num*args.embed_size, args.embed_size]))),
})
self.embedding_size = {'user': {}, 'item': {}}
def mm(self, x, y):
if args.sparse:
return torch.sparse.mm(x, y)
else:
return torch.mm(x, y)
def sim(self, z1, z2):
z1 = F.normalize(z1)
z2 = F.normalize(z2)
return torch.mm(z1, z2.t())
def batched_contrastive_loss(self, z1, z2, batch_size=4096):
device = z1.device
num_nodes = z1.size(0)
num_batches = (num_nodes - 1) // batch_size + 1
f = lambda x: torch.exp(x / self.tau)
indices = torch.arange(0, num_nodes).to(device)
losses = []
for i in range(num_batches):
mask = indices[i * batch_size:(i + 1) * batch_size]
refl_sim = f(self.sim(z1[mask], z1))
between_sim = f(self.sim(z1[mask], z2))
losses.append(-torch.log(
between_sim[:, i * batch_size:(i + 1) * batch_size].diag()
/ (refl_sim.sum(1) + between_sim.sum(1)
- refl_sim[:, i * batch_size:(i + 1) * batch_size].diag())))
loss_vec = torch.cat(losses)
return loss_vec.mea()
def csr_norm(self,
csr_mat,
mean_flag=False):
rowsum = np.array(csr_mat.sum(1))
rowsum = np.power(rowsum+1e-8, -0.5).flatten()
rowsum[np.isinf(rowsum)] = 0.
rowsum_diag = sp.diags(rowsum)
colsum = np.array(csr_mat.sum(0))
colsum = np.power(colsum+1e-8, -0.5).flatten()
colsum[np.isinf(colsum)] = 0.
colsum_diag = sp.diags(colsum)
if mean_flag is False:
return rowsum_diag*csr_mat*colsum_diag
else:
return rowsum_diag*csr_mat
def matrix_to_tensor(self, cur_matrix):
if type(cur_matrix) != sp.coo_matrix:
cur_matrix = cur_matrix.tocoo()
indices = torch.from_numpy(np.vstack((cur_matrix.row, cur_matrix.col)).astype(np.int64))
values = torch.from_numpy(cur_matrix.data)
shape = torch.Size(cur_matrix.shape)
return torch.sparse.FloatTensor(indices, values, shape).to(torch.float32).cuda()
def para_dict_to_tensor(self, para_dict):
tensors = []
for beh in para_dict.keys():
tensors.append(para_dict[beh])
tensors = torch.stack(tensors, dim=0)
return tensors
def multihead_attention(self, trans_w, embedding_t_1, embedding_t):
q = self.para_dict_to_tensor(embedding_t)
v = k = self.para_dict_to_tensor(embedding_t_1)
beh, N, d_h = q.shape[0], q.shape[1], args.embed_size/args.head_num
Q = torch.matmul(q, trans_w["w_q"])
K = torch.matmul(k, trans_w["w_k"])
V = v
Q = Q.reshape(beh, N, args.head_num, int(d_h)).permute(2, 0, 1, 3)
K = Q.reshape(beh, N, args.head_num, int(d_h)).permute(2, 0, 1, 3)
Q = torch.unsqueeze(Q, 2)
K = torch.unsqueeze(K, 1)
V = torch.unsqueeze(V, 1)
att = torch.mul(Q, K) / torch.sqrt(torch.tensor(d_h))
att = torch.sum(att, dim=-1)
att = torch.unsqueeze(att, dim=-1)
att = F.softmax(att, dim=2)
Z = torch.mul(att, V)
Z = torch.sum(Z, dim=2)
Z_list = [value for value in Z]
Z = torch.cat(Z_list, -1)
Z = torch.matmul(Z, self.weight_dict['w_self_attention_cat'])
args.model_cat_rate*F.normalize(Z, p=2, dim=2)
return Z, att.detach()
def forward(self,
ui_graph,
iu_graph,
image_ui_graph,
image_iu_graph,
text_ui_graph,
text_iu_graph):
image_feats = image_item_feats = self.dropout(self.image_trans(self.image_feats))
text_feats = text_item_feats = self.dropout(self.text_trans(self.text_feats))
for i in range(args.layers):
image_user_feats = self.mm(ui_graph, image_feats)
image_item_feats = self.mm(iu_graph, image_user_feats)
image_user_id = self.mm(image_ui_graph, self.item_id_embedding.weight)
image_item_id = self.mm(image_iu_graph, self.user_id_embedding.weight)
text_user_feats = self.mm(ui_graph, text_feats)
text_item_feats = self.mm(iu_graph, text_user_feats)
text_user_id = self.mm(text_ui_graph, self.item_id_embedding.weight)
text_item_id = self.mm(text_iu_graph, self.user_id_embedding.weight)
self.embedding_dict['user']['image'] = image_user_id
self.embedding_dict['user']['text'] = text_user_id
self.embedding_dict['item']['image'] = image_item_id
self.embedding_dict['item']['text'] = text_item_id
user_z, _ = self.multihead_attention(self.weight_dict,
self.embedding_dict['user'],
self.embedding_dict['user'])
item_z, _ = self.multihead_attention(self.weight_dict,
self.embedding_dict['item'], self.embedding_dict['item'])
user_emb = user_z.mean(0)
item_emb = item_z.mean(0)
u_g_embeddings = self.user_id_embedding.weight + args.id_cat_rate*F.normalize(user_emb, p=2, dim=1)
i_g_embeddings = self.item_id_embedding.weight + args.id_cat_rate*F.normalize(item_emb, p=2, dim=1)
user_emb_list = [u_g_embeddings]
item_emb_list = [i_g_embeddings]
for i in range[self.n_ui_layers]:
if i == (self.n_ui_layers-1):
u_g_embeddings = self.softmax(torch.mm(ui_graph, i_g_embeddings))
i_g_embeddings = self.softmax(torch.mm(iu_graph, u_g_embeddings))
else:
u_g_embeddings = torch.mm(ui_graph, i_g_embeddings)
i_g_embeddings = torch.mm(iu_graph, u_g_embeddings)
user_emb.append(u_g_embeddings)
item_emb_list.append(i_g_embeddings)
u_g_embeddings = torch.mean(torch.stack(user_emb_list), dim=0)
i_g_embeddings = torch.mean(torch.stack(item_emb_list), dim=0)
u_g_embeddings = u_g_embeddings + args.model_cat_rate*F.normalize(image_user_feats, p=2, dim=1) \
+ args.model_cat_rate*F.normalize(text_user_feats,p=2, dim=1 )
i_g_embeddings = i_g_embeddings + args.model_cat_rate*F.normalize(image_item_feats, p=2, dim=1) \
+ args.model_cat_rate*F.normalize(text_item_feats, p=2, dim=1)
return u_g_embeddings, i_g_embeddings, image_item_feats, text_item_feats, \
image_user_feats, text_user_feats, u_g_embeddings, i_g_embeddings, image_user_id, \
text_user_id, image_item_id, text_item_id
class Discriminator(nn.Module):
def __init__(self, dim):
super(Discriminator, self).__init__()
self.net = nn.Sequential(
nn.Linear(dim, int(dim/4)),
nn.LeakyReLU(True),
nn.BatchNorm1d(int(dim/4)),
nn.Dropout(args.G_drop1),
nn.Linear(int(dim/4), int(dim/8)),
nn.LeakyReLU(True),
nn.BatchNorm1d(int(dim/8)),
nn.Dropout(args.G_drop2),
nn.Linear(int(dim/8), 1),
nn.Sigmoid()
)
def forward(self, x):
output = 100*self.net(x.float())
return output.view(-1) | Tiktokx-main | tiktokx/model.py |
import argparse
import json
import os
import random as rd
from datetime import datetime
from time import time
import numpy as np
import scipy.sparse as sp
import torch
from scipy.parse import csr_matrix
from sklearn.metrics import roc_auc_score
from tiktokx.utils import parse_args
args = parse_args()
def build_sim(context):
context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True))
sim = torch.sparse.nn(context_norm, context_norm.transpose(1, 0))
return sim
def build_knn_normalized_graph(adj, topk, is_sparse, norm_type):
device = adj.device
knn_val, knn_ind, = torch.topk(adj, topk, dim=1) #[7050, 10][7050, 10]
n_item = knn_val.shape[0]
n_data = knn_val.shape[0]*knn_val.shape[1]
data = np.ones(n_data)
if is_sparse:
tuple_list = [[row, int(col)] for row in range(len(knn_ind)) for col in knn_ind[row]]
row = [i[0] for i in tuple_list]
col = [i[i] for i in tuple_list]
ll_graph = csr_matrix((data, (row, col)), shape=(n_item, n_item))
return ll_graph
else:
weighted_adjacency_matrix = (torch.zeros_like(adj)).scatter_(-1, knn_ind, knn_val)
return get_dense_laplacian(weighted_adjacency_matrix, normalization=norm_type)
def get_sparse_laplacian(edge_index, edge_weight, num_nodes, normalization='none'): #[2, 70500], [70500]
from torch_scatter import scatter_add
row, col = edge_index[0], edge_index[1] #[70500] [70500]
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes) #[7050]
if normalization == 'sym':
deg_inv_sqrt = deg.pow_(-0.5)
deg_inv_sqrt.masked_fill_(deg_inv_sqrt == float('inf'), 0)
edge_weight = deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
elif normalization == 'rw':
deg_inv = 1.0 / deg
deg_inv.masked_fill_(deg_inv == float('inf'), 0)
edge_weight = deg_inv[row] * edge_weight
return edge_index, edge_weight
def get_dense_laplacian(adj, normalization='none'):
if normalization == 'sym':
rowsum = torch.sum(adj, -1)
d_inv_sqrt = torch.pow(rowsum, -0.5)
d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt)
L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
elif normalization == 'rw':
rowsum = torch.sum(adj, -1)
d_inv = torch.pow(rowsum, -1)
d_inv[torch.isinf(d_inv)] = 0.
d_mat_inv = torch.diagflat(d_inv)
L_norm = torch.mm(d_mat_inv, adj)
elif normalization == 'none':
L_norm = adj
return L_norm
######## Metrics
def recall(rank, ground_truth, N):
return len(set(rank[:N]) & set(ground_truth)) / float(len(set(ground_truth)))
def precision_at_k(r, k):
assert k >= 1
r = np.asarray(r)[:k]
return np.mean(r)
def average_precision(r, cut):
r = np.asarray(r)
out = [precision_at_k(r, k + 1) for k in range(cut) if r[k]]
if not out:
return 0.
return np.sum(out)/float(min(cut, np.sum(r)))
def mean_average_preision(rs):
return np.mean([average_precision(r) for r in rs])
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)) )
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 and 1')
return 0.
def ndcg_at_k(r, k, method=1):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def recall_at_k(r, k, all_pos_num):
r = np.asfarray(r)[:k]
if all_pos_num == 0:
return 0
else:
return np.sum(r) / all_pos_num
def hit_at_k(r, k):
r = np.array(r)[:k]
if np.sum(r) > 0:
return 1.
else:
return 0.
def F1(pre, rec):
if pre + rec > 0:
return (2.0 * pre * rec) / (pre + rec)
else:
return 0.
def auc(ground_truth, prediction):
try:
res = roc_auc_score(y_true=ground_truth, y_score=prediction)
except Exception:
res = 0.
return res
######## Logger
class Logger():
def __init__(self,
filename,
is_debug,
path="tiktok/logs"):
self.filename = filename
self.path = path
self.log_ = not is_debug
def logging(self, s):
s = str(s)
print(datetime.now().strftime('%Y-%m-%d %H:%M: '), s)
if self.log_:
with open(os.path.join(os.path.join(self.path, self.filename)), 'a+') as f_log:
f_log.write(str(datetime.now().strftime('%Y-%m-%d %H:%M: ')) + s + '\n')
############
class Data(object):
def __init__(self, path, batch_size):
self.path = path #+ '/%d-core' % args.core
self.batch_size = batch_size
train_file = path + '/train.json'#+ '/%d-core/train.json' % (args.core)
val_file = path + '/val.json' #+ '/%d-core/val.json' % (args.core)
test_file = path + '/test.json' #+ '/%d-core/test.json' % (args.core)
#get number of users and items
self.n_users, self.n_items = 0, 0
self.n_train, self.n_test = 0, 0
self.neg_pools = {}
self.exist_users = []
train = json.load(open(train_file))
test = json.load(open(test_file))
val = json.load(open(val_file))
for uid, items in train.items():
if len(items) == 0:
continue
uid = int(uid)
self.exist_users.append(uid)
self.n_items = max(self.n_items, max(items))
self.n_users = max(self.n_users, uid)
self.n_train += len(items)
for uid, items in test.items():
uid = int(uid)
try:
self.n_items = max(self.n_items, max(items))
self.n_test += len(items)
except:
continue
for uid, items in val.items():
uid = int(uid)
try:
self.n_items = max(self.n_items, max(items))
self.n_val += len(items)
except:
continue
self.n_items += 1
self.n_users += 1
self.print_statistics()
self.R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32)
self.R_Item_Interacts = sp.dok_matrix((self.n_items, self.n_items), dtype=np.float32)
self.train_items, self.test_set, self.val_set = {}, {}, {}
for uid, train_items in train.items():
if len(train_items) == 0:
continue
uid = int(uid)
for idx, i in enumerate(train_items):
self.R[uid, i] = 1.
self.train_items[uid] = train_items
for uid, test_items in test.items():
uid = int(uid)
if len(test_items) == 0:
continue
try:
self.test_set[uid] = test_items
except:
continue
for uid, val_items in val.items():
uid = int(uid)
if len(val_items) == 0:
continue
try:
self.val_set[uid] = val_items
except:
continue
def get_adj_mat(self):
try:
t1 = time()
adj_mat = sp.load_npz(self.path + '/s_adj_mat.npz')
norm_adj_mat = sp.load_npz(self.path + '/s_norm_adj_mat.npz')
mean_adj_mat = sp.load_npz(self.path + '/s_mean_adj_mat.npz')
print('already load adj matrix', adj_mat.shape, time() - t1)
except Exception:
adj_mat, norm_adj_mat, mean_adj_mat = self.create_adj_mat()
sp.save_npz(self.path + '/s_adj_mat.npz', adj_mat)
sp.save_npz(self.path + '/s_norm_adj_mat.npz', norm_adj_mat)
sp.save_npz(self.path + '/s_mean_adj_mat.npz', mean_adj_mat)
return adj_mat, norm_adj_mat, mean_adj_mat
def create_adj_mat(self):
t1 = time()
adj_mat = sp.dok_matrix((self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32)
adj_mat = adj_mat.tolil()
R = self.R.tolil()
adj_mat[:self.n_users, self.n_users:] = R
adj_mat[self.n_users:, :self.n_users] = R.T
adj_mat = adj_mat.todok()
print('already create adjacency matrix', adj_mat.shape, time() - t1)
t2 = time()
def normalized_adj_single(adj):
rowsum = np.array(adj.sum(1))
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
norm_adj = d_mat_inv.dot(adj)
# norm_adj = adj.dot(d_mat_inv)
print('generate single-normalized adjacency matrix.')
return norm_adj.tocoo()
def get_D_inv(adj):
rowsum = np.array(adj.sum(1))
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
return d_mat_inv
def check_adj_if_equal(adj):
dense_A = np.array(adj.todense())
degree = np.sum(dense_A, axis=1, keepdims=False)
temp = np.dot(np.diag(np.power(degree, -1)), dense_A)
print('check normalized adjacency matrix whether equal to this laplacian matrix.')
return temp
norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))
mean_adj_mat = normalized_adj_single(adj_mat)
print('already normalize adjacency matrix', time() - t2)
return adj_mat.tocsr(), norm_adj_mat.tocsr(), mean_adj_mat.tocsr()
def sample(self):
if self.batch_size <= self.n_users:
users = rd.sample(self.exist_users, self.batch_size)
else:
users = [rd.choice(self.exist_users) for _ in range(self.batch_size)]
# users = self.exist_users[:]
def sample_pos_items_for_u(u, num):
pos_items = self.train_items[u]
n_pos_items = len(pos_items)
pos_batch = []
while True:
if len(pos_batch) == num: break
pos_id = np.random.randint(low=0, high=n_pos_items, size=1)[0]
pos_i_id = pos_items[pos_id]
if pos_i_id not in pos_batch:
pos_batch.append(pos_i_id)
return pos_batch
def sample_neg_items_for_u(u, num):
neg_items = []
while True:
if len(neg_items) == num: break
neg_id = np.random.randint(low=0, high=self.n_items, size=1)[0]
if neg_id not in self.train_items[u] and neg_id not in neg_items:
neg_items.append(neg_id)
return neg_items
def sample_neg_items_for_u_from_pools(u, num):
neg_items = list(set(self.neg_pools[u]) - set(self.train_items[u]))
return rd.sample(neg_items, num)
pos_items, neg_items = [], []
for u in users:
pos_items += sample_pos_items_for_u(u, 1)
neg_items += sample_neg_items_for_u(u, 1)
# neg_items += sample_neg_items_for_u(u, 3)
return users, pos_items, neg_items
def print_statistics(self):
print('n_users=%d, n_items=%d' % (self.n_users, self.n_items))
print('n_interactions=%d' % (self.n_train + self.n_test))
print('n_train=%d, n_test=%d, sparsity=%.5f' % (self.n_train, self.n_test, (self.n_train + self.n_test)/(self.n_users * self.n_items)))
def parse_args():
parser = argparse.ArgumentParser(description="")
#useless
parser.add_argument('--verbose', type=int, default=5, help='Interval of evaluation.')
parser.add_argument('--core', type=int, default=5, help='5-core for warm-start; 0-core for cold start')
parser.add_argument('--lambda_coeff', type=float, default=0.9, help='Lambda value of skip connection')
parser.add_argument('--early_stopping_patience', type=int, default=7, help='')
parser.add_argument('--layers', type=int, default=1, help='Number of feature graph conv layers')
parser.add_argument('--mess_dropout', nargs='?', default='[0.1, 0.1]', help='Keep probability w.r.t. message dropout (i.e., 1-dropout_ratio) for each deep layer. 1: no dropout.')
parser.add_argument('--sparse', type=int, default=1, help='Sparse or dense adjacency matrix')
parser.add_argument('--test_flag', nargs='?', default='part', help='Specify the test type from {part, full}, indicating whether the reference is done in mini-batch')
parser.add_argument('--metapath_threshold', default=2, type=int, help='metapath_threshold')
parser.add_argument('--sc', type=float, default=1.0, help='GCN self connection')
parser.add_argument('--ssl_c_rate', type=float, default=1.3, help='ssl_c_rate')
parser.add_argument('--ssl_s_rate', type=float, default=0.8, help='ssl_s_rate')
parser.add_argument('--g_rate', type=float, default=0.000029, help='ssl_s_rate')
parser.add_argument('--sample_num', default=1, type=int, help='sample_num')
parser.add_argument('--sample_num_neg', default=1, type=int, help='sample_num')
parser.add_argument('--sample_num_ii', default=8, type=int, help='sample_num')
parser.add_argument('--sample_num_co', default=2, type=int, help='sample_num')
parser.add_argument('--mask_rate', default=0.75, type=float, help='sample_num')
parser.add_argument('--gss_rate', default=0.85, type=float, help='gene_self_subgraph_rate')
parser.add_argument('--anchor_rate', default=0.75, type=float, help='anchor_rate')
parser.add_argument('--feat_reg_decay', default=1e-5, type=float, help='feat_reg_decay')
parser.add_argument('--ad1_rate', default=0.2, type=float, help='ad1_rate')
parser.add_argument('--ad2_rate', default=0.2, type=float, help='ad1_rate')
parser.add_argument('--ad_sampNum', type=int, default=1, help='ad topk')
parser.add_argument('--ad_topk_multi_num', type=int, default=100, help='ad topk')
parser.add_argument('--fake_gene_rate', default=0.0001, type=float, help='fake_gene_rate')
parser.add_argument('--ID_layers', type=int, default=1, help='Number of item graph conv layers')
parser.add_argument('--reward_rate', default=1, type=float, help='fake_gene_rate')
parser.add_argument('--G_embed_size', type=int, default=64, help='Embedding size.')
parser.add_argument('--model_num', default=2, type=float, help='fake_gene_rate')
parser.add_argument('--negrate', default=0.01, type=float, help='item_neg_sample_rate')
parser.add_argument('--cis', default=25, type=int, help='')
parser.add_argument('--confidence', default=0.5, type=float, help='')
parser.add_argument('--ii_it', default=15, type=int, help='')
parser.add_argument('--isload', default=False , type=bool, help='whether load model') #
parser.add_argument('--isJustTest', default=False , type=bool, help='whether load model')
parser.add_argument('--loadModelPath', default='/home/ww/Code/work3/BSTRec/Model/retailrocket/for_meta_hidden_dim_dim__8_retailrocket_2021_07_10__18_35_32_lr_0.0003_reg_0.01_batch_size_1024_gnn_layer_[16,16,16].pth', type=str, help='loadModelPath')
parser.add_argument('--title', default="try_to_draw_line", type=str, help='') #
#train
parser.add_argument('--data_path', nargs='?', default='/home/ww/Code/work5/MMSSL/data/', help='Input data path.')
parser.add_argument('--seed', type=int, default=2022, help='Random seed')
parser.add_argument('--dataset', nargs='?', default='', help='Choose a dataset from {sports, baby, clothing, tiktok, allrecipes}')
parser.add_argument('--epoch', type=int, default=1000, help='Number of epoch.') #default: 1000
parser.add_argument('--batch_size', type=int, default=1024, help='Batch size.')
parser.add_argument('--embed_size', type=int, default=64,help='Embedding size.')
parser.add_argument('--D_lr', type=float, default=3e-4, help='Learning rate.')
parser.add_argument('--topk', type=int, default=10, help='K value of k-NN sparsification')
parser.add_argument('--cf_model', nargs='?', default='slmrec', help='Downstream Collaborative Filtering model {mf}')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--cl_rate', type=float, default=0.03, help='Control the effect of the contrastive auxiliary task')
parser.add_argument('--norm_type', nargs='?', default='sym', help='Adjacency matrix normalization operation')
parser.add_argument('--gpu_id', type=int, default=0, help='GPU id')
parser.add_argument('--Ks', nargs='?', default='[10, 20, 50]', help='K value of ndcg/recall @ k')
parser.add_argument('--regs', nargs='?', default='[1e-5,1e-5,1e-2]', help='for emb_loss.') #default: '[1e-5,1e-5,1e-2]'
parser.add_argument('--lr', type=float, default=0.00055, help='Learning rate.')
parser.add_argument('--emm', default=1e-3, type=float, help='for feature embedding bpr') #
parser.add_argument('--L2_alpha', default=1e-3, type=float, help='') #
parser.add_argument('--weight_decay', default=1e-4, type=float, help='for opt_D') #
#GNN
parser.add_argument('--drop_rate', type=float, default=0.2, help='dropout rate')
parser.add_argument('--model_cat_rate', type=float, default=0.55, help='model_cat_rate')
parser.add_argument('--gnn_cat_rate', type=float, default=0.55, help='gnn_cat_rate')
parser.add_argument('--id_cat_rate', type=float, default=0.36, help='before GNNs')
parser.add_argument('--id_cat_rate1', type=float, default=0.36, help='id_cat_rate')
parser.add_argument('--head_num', default=4, type=int, help='head_num_of_multihead_attention. For multi-model relation.') #
parser.add_argument('--dgl_nei_num', default=8, type=int, help='dgl_nei_num') #
#GAN
parser.add_argument('--weight_size', nargs='?', default='[64, 64]', help='Output sizes of every layer') #default: '[64, 64]'
parser.add_argument('--G_rate', default=0.0001, type=float, help='for D model1') #
parser.add_argument('--G_drop1', default=0.31, type=float, help='for D model2') #
parser.add_argument('--G_drop2', default=0.5, type=float, help='') #
parser.add_argument('--gp_rate', default=1, type=float, help='gradient penal') #
parser.add_argument('--real_data_tau', default=0.005, type=float, help='for real_data soft') #
parser.add_argument('--ui_pre_scale', default=100, type=int, help='ui_pre_scale')
#cl
parser.add_argument('--T', default=1, type=int, help='it for ui update')
parser.add_argument('--tau', default=0.5, type=float, help='') #
parser.add_argument('--geneGraph_rate', default=0.1, type=float, help='') #
parser.add_argument('--geneGraph_rate_pos', default=2, type=float, help='') #
parser.add_argument('--geneGraph_rate_neg', default=-1, type=float, help='') #
parser.add_argument('--m_topk_rate', default=0.0001, type=float, help='for reconstruct')
parser.add_argument('--log_log_scale', default=0.00001, type=int, help='log_log_scale')
parser.add_argument('--point', default='', type=str, help='point')
return parser.parse_args()
| Tiktokx-main | tiktokx/utils.py |
import copy
import math
import os
import pickle
import random
import sys
from datetime import datetime
from time import time
import dgl
import numpy as np
import scipy.sparse as sp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.sparse as sparse
import visdom
from Models import MMSSL, Discriminator
from scipy.sparse import csr_matrix
from torch import autograd
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from tiktokx.utils import *
from tiktokx.utils import Logger, build_knn_normalized_graph, build_sim, parse_args
args = parse_args()
class Trainer(object):
def __init__(self, data_config):
self.task_name = "%s_%s_%s" % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), args.dataset, args.cf_model,)
self.logger = Logger(filename=self.task_name, is_debug=args.debug)
self.logger.logging("PID: %d" % os.getpid())
self.logger.logging(str(args))
self.mess_dropout = eval(args.mess_dropout)
self.lr = args.lr
self.emb_dim = args.embed_size
self.batch_size = args.batch_size
self.weight_size = eval(args.weight_size)
self.n_layers = len(self.weight_size)
self.regs = eval(args.regs)
self.decay = self.regs[0]
self.image_feats = np.load(args.data_path + '{}/image_feat.npy'.format(args.dataset))
self.text_feats = np.load(args.data_path + '{}/text_feat.npy'.format(args.dataset))
self.image_feat_dim = self.image_feats.shape[-1]
self.text_feat_dim = self.text_feats.shape[-1]
self.ui_graph = self.ui_graph_raw = pickle.load(open(args.data_path + args.dataset + '/train_mat','rb'))
self.image_ui_graph_tmp = self.text_ui_graph_tmp = torch.tensor(self.ui_graph_raw.todense()).cuda()
self.image_iu_graph_tmp = self.text_iu_graph_tmp = torch.tensor(self.ui_graph_raw.T.todense()).cuda()
self.image_ui_index = {'x':[], 'y':[]}
self.text_ui_index = {'x':[], 'y':[]}
self.n_users = self.ui_graph.shape[0]
self.n_items = self.ui_graph.shape[1]
self.iu_graph = self.ui_graph.T
self.ui_graph = self.matrix_to_tensor(self.csr_norm(self.ui_graph, mean_flag=True))
self.iu_graph = self.matrix_to_tensor(self.csr_norm(self.iu_graph, mean_flag=True))
self.image_ui_graph = self.text_ui_graph = self.ui_graph
self.image_iu_graph = self.text_iu_graph = self.iu_graph
self.model = MMSSL(self.n_users, self.n_items, self.emb_dim, self.weight_size, self.mess_dropout, self.image_feats, self.text_feats)
self.model = self.model.cuda()
self.D = Discriminator(self.n_items).cuda()
self.D.apply(self.weights_init)
self.optim_D = optim.Adam(self.D.parameters(), lr=args.D_lr, betas=(0.5, 0.9))
self.optimizer_D = optim.AdamW(
[
{'params':self.model.parameters()},
]
, lr=self.lr)
self.scheduler_D = self.set_lr_scheduler()
def set_lr_scheduler(self):
fac = lambda epoch: 0.96 ** (epoch / 50)
scheduler_D = optim.lr_scheduler.LambdaLR(self.optimizer_D, lr_lambda=fac)
return scheduler_D
def csr_norm(self, csr_mat, mean_flag=False):
rowsum = np.array(csr_mat.sum(1))
rowsum = np.power(rowsum+1e-8, -0.5).flatten()
rowsum[np.isinf(rowsum)] = 0.
rowsum_diag = sp.diags(rowsum)
colsum = np.array(csr_mat.sum(0))
colsum = np.power(colsum+1e-8, -0.5).flatten()
colsum[np.isinf(colsum)] = 0.
colsum_diag = sp.diags(colsum)
if mean_flag == False:
return rowsum_diag*csr_mat*colsum_diag
else:
return rowsum_diag*csr_mat
def matrix_to_tensor(self, cur_matrix):
if type(cur_matrix) != sp.coo_matrix:
cur_matrix = cur_matrix.tocoo() #
indices = torch.from_numpy(np.vstack((cur_matrix.row, cur_matrix.col)).astype(np.int64)) #
values = torch.from_numpy(cur_matrix.data) #
shape = torch.Size(cur_matrix.shape)
return torch.sparse.FloatTensor(indices, values, shape).to(torch.float32).cuda() #
def innerProduct(self, u_pos, i_pos, u_neg, j_neg):
pred_i = torch.sum(torch.mul(u_pos,i_pos), dim=-1)
pred_j = torch.sum(torch.mul(u_neg,j_neg), dim=-1)
return pred_i, pred_j
def sampleTrainBatch_dgl(self, batIds, pos_id=None, g=None, g_neg=None, sample_num=None, sample_num_neg=None):
sub_g = dgl.sampling.sample_neighbors(g.cpu(), {'user':batIds}, sample_num, edge_dir='out', replace=True)
row, col = sub_g.edges()
row = row.reshape(len(batIds), sample_num)
col = col.reshape(len(batIds), sample_num)
if g_neg==None:
return row, col
else:
sub_g_neg = dgl.sampling.sample_neighbors(g_neg, {'user':batIds}, sample_num_neg, edge_dir='out', replace=True)
row_neg, col_neg = sub_g_neg.edges()
row_neg = row_neg.reshape(len(batIds), sample_num_neg)
col_neg = col_neg.reshape(len(batIds), sample_num_neg)
return row, col, col_neg
def weights_init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight)
m.bias.data.fill_(0)
def gradient_penalty(self, D, xr, xf):
LAMBDA = 0.3
xf = xf.detach()
xr = xr.detach()
alpha = torch.rand(args.batch_size*2, 1).cuda()
alpha = alpha.expand_as(xr)
interpolates = alpha * xr + ((1 - alpha) * xf)
interpolates.requires_grad_()
disc_interpolates = D(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gp = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gp
def weighted_sum(self, anchor, nei, co):
ac = torch.multiply(anchor, co).sum(-1).sum(-1)
nc = torch.multiply(nei, co).sum(-1).sum(-1)
an = (anchor.permute(1, 0, 2)[0])
ne = (nei.permute(1, 0, 2)[0])
an_w = an*(ac.unsqueeze(-1).repeat(1, args.embed_size))
ne_w = ne*(nc.unsqueeze(-1).repeat(1, args.embed_size))
res = (args.anchor_rate*an_w + (1-args.anchor_rate)*ne_w).reshape(-1, args.sample_num_ii, args.embed_size).sum(1)
return res
def sample_topk(self, u_sim, users, emb_type=None):
topk_p, topk_id = torch.topk(u_sim, args.ad_topk*10, dim=-1)
topk_data = topk_p.reshape(-1).cpu()
topk_col = topk_id.reshape(-1).cpu().int()
topk_row = torch.tensor(np.array(users)).unsqueeze(1).repeat(1, args.ad_topk*args.ad_topk_multi_num).reshape(-1).int() #
topk_csr = csr_matrix((topk_data.detach().numpy(), (topk_row.detach().numpy(), topk_col.detach().numpy())), shape=(self.n_users, self.n_items))
topk_g = dgl.heterograph({('user','ui','item'):topk_csr.nonzero()})
_, topk_id = self.sampleTrainBatch_dgl(users, g=topk_g, sample_num=args.ad_topk, pos_id=None, g_neg=None, sample_num_neg=None)
self.gene_fake[emb_type] = topk_id
topk_id_u = torch.arange(len(users)).unsqueeze(1).repeat(1, args.ad_topk)
topk_p = u_sim[topk_id_u, topk_id]
return topk_p, topk_id
def ssl_loss_calculation(self, ssl_image_logit, ssl_text_logit, ssl_common_logit):
ssl_label_1_s2 = torch.ones(1, self.n_items).cuda()
ssl_label_0_s2 = torch.zeros(1, self.n_items).cuda()
ssl_label_s2 = torch.cat((ssl_label_1_s2, ssl_label_0_s2), 1)
ssl_image_s2 = self.bce(ssl_image_logit, ssl_label_s2)
ssl_text_s2 = self.bce(ssl_text_logit, ssl_label_s2)
ssl_loss_s2 = ssl_image_s2 + ssl_text_s2
ssl_label_1_c2 = torch.ones(1, self.n_items*2).cuda()
ssl_label_0_c2 = torch.zeros(1, self.n_items*2).cuda()
ssl_label_c2 = torch.cat((ssl_label_1_c2, ssl_label_0_c2), 1)
ssl_result_c2 = self.bce(ssl_common_logit, ssl_label_c2)
ssl_loss_c2 = ssl_result_c2
ssl_loss2 = args.ssl_s_rate*ssl_loss_s2 + args.ssl_c_rate*ssl_loss_c2
return ssl_loss2
def sim(self, z1, z2):
z1 = F.normalize(z1)
z2 = F.normalize(z2)
# z1 = z1/((z1**2).sum(-1) + 1e-8)
# z2 = z2/((z2**2).sum(-1) + 1e-8)
return torch.mm(z1, z2.t())
def batched_contrastive_loss(self, z1, z2, batch_size=1024):
device = z1.device
num_nodes = z1.size(0)
num_batches = (num_nodes - 1) // batch_size + 1
f = lambda x: torch.exp(x / args.tau) #
indices = torch.arange(0, num_nodes).to(device)
losses = []
for i in range(num_batches):
tmp_i = indices[i * batch_size:(i + 1) * batch_size]
tmp_refl_sim_list = []
tmp_between_sim_list = []
for j in range(num_batches):
tmp_j = indices[j * batch_size:(j + 1) * batch_size]
tmp_refl_sim = f(self.sim(z1[tmp_i], z1[tmp_j]))
tmp_between_sim = f(self.sim(z1[tmp_i], z2[tmp_j]))
tmp_refl_sim_list.append(tmp_refl_sim)
tmp_between_sim_list.append(tmp_between_sim)
refl_sim = torch.cat(tmp_refl_sim_list, dim=-1)
between_sim = torch.cat(tmp_between_sim_list, dim=-1)
losses.append(-torch.log(between_sim[:, i * batch_size:(i + 1) * batch_size].diag()/ (refl_sim.sum(1) + between_sim.sum(1) - refl_sim[:, i * batch_size:(i + 1) * batch_size].diag())+1e-8))
del refl_sim, between_sim, tmp_refl_sim_list, tmp_between_sim_list
loss_vec = torch.cat(losses)
return loss_vec.mean()
def feat_reg_loss_calculation(self, g_item_image, g_item_text, g_user_image, g_user_text):
feat_reg = 1./2*(g_item_image**2).sum() + 1./2*(g_item_text**2).sum() \
+ 1./2*(g_user_image**2).sum() + 1./2*(g_user_text**2).sum()
feat_reg = feat_reg / self.n_items
feat_emb_loss = args.feat_reg_decay * feat_reg
return feat_emb_loss
def fake_gene_loss_calculation(self, u_emb, i_emb, emb_type=None):
if self.gene_u!=None:
gene_real_loss = (-F.logsigmoid((u_emb[self.gene_u]*i_emb[self.gene_real]).sum(-1)+1e-8)).mean()
gene_fake_loss = (1-(-F.logsigmoid((u_emb[self.gene_u]*i_emb[self.gene_fake[emb_type]]).sum(-1)+1e-8))).mean()
gene_loss = gene_real_loss + gene_fake_loss
else:
gene_loss = 0
return gene_loss
def reward_loss_calculation(self, users, re_u, re_i, topk_id, topk_p):
self.gene_u = torch.tensor(np.array(users)).unsqueeze(1).repeat(1, args.ad_topk)
reward_u = re_u[self.gene_u]
reward_i = re_i[topk_id]
reward_value = (reward_u*reward_i).sum(-1)
reward_loss = -(((topk_p*reward_value).sum(-1)).mean()+1e-8).log()
return reward_loss
def u_sim_calculation(self, users, user_final, item_final):
topk_u = user_final[users]
u_ui = torch.tensor(self.ui_graph_raw[users].todense()).cuda()
num_batches = (self.n_items - 1) // args.batch_size + 1
indices = torch.arange(0, self.n_items).cuda()
u_sim_list = []
for i_b in range(num_batches):
index = indices[i_b * args.batch_size:(i_b + 1) * args.batch_size]
sim = torch.mm(topk_u, item_final[index].T)
sim_gt = torch.multiply(sim, (1-u_ui[:, index]))
u_sim_list.append(sim_gt)
u_sim = F.normalize(torch.cat(u_sim_list, dim=-1), p=2, dim=1)
return u_sim
def test(self, users_to_test, is_val):
self.model.eval()
with torch.no_grad():
ua_embeddings, ia_embeddings, *rest = self.model(self.ui_graph, self.iu_graph, self.image_ui_graph, self.image_iu_graph, self.text_ui_graph, self.text_iu_graph)
result = test_torch(ua_embeddings, ia_embeddings, users_to_test, is_val)
return result
def train(self):
now_time = datetime.now()
run_time = datetime.strftime(now_time,'%Y_%m_%d__%H_%M_%S')
training_time_list = []
loss_loger, pre_loger, rec_loger, ndcg_loger, hit_loger = [], [], [], [], []
line_var_loss, line_g_loss, line_d_loss, line_cl_loss, line_var_recall, line_var_precision, line_var_ndcg = [], [], [], [], [], [], []
stopping_step = 0
should_stop = False
cur_best_pre_0 = 0.
# tb_writer = SummaryWriter(log_dir="/home/ww/Code/work5/MICRO2Ours/tensorboard/")
# tensorboard_cnt = 0
n_batch = data_generator.n_train // args.batch_size + 1
best_recall = 0
for epoch in range(args.epoch):
t1 = time()
loss, mf_loss, emb_loss, reg_loss = 0., 0., 0., 0.
contrastive_loss = 0.
n_batch = data_generator.n_train // args.batch_size + 1
sample_time = 0.
self.gene_u, self.gene_real, self.gene_fake = None, None, {}
self.topk_p_dict, self.topk_id_dict = {}, {}
for idx in tqdm(range(n_batch)):
self.model.train()
sample_t1 = time()
users, pos_items, neg_items = data_generator.sample()
sample_time += time() - sample_t1
with torch.no_grad():
ua_embeddings, ia_embeddings, image_item_embeds, text_item_embeds, image_user_embeds, text_user_embeds \
, _, _, _, _, _, _ \
= self.model(self.ui_graph, self.iu_graph, self.image_ui_graph, self.image_iu_graph, self.text_ui_graph, self.text_iu_graph)
ui_u_sim_detach = self.u_sim_calculation(users, ua_embeddings, ia_embeddings).detach()
image_u_sim_detach = self.u_sim_calculation(users, image_user_embeds, image_item_embeds).detach()
text_u_sim_detach = self.u_sim_calculation(users, text_user_embeds, text_item_embeds).detach()
inputf = torch.cat((image_u_sim_detach, text_u_sim_detach), dim=0)
predf = (self.D(inputf))
lossf = (predf.mean())
u_ui = torch.tensor(self.ui_graph_raw[users].todense()).cuda()
u_ui = F.softmax(u_ui - args.log_log_scale*torch.log(-torch.log(torch.empty((u_ui.shape[0], u_ui.shape[1]), dtype=torch.float32).uniform_(0,1).cuda()+1e-8)+1e-8)/args.real_data_tau, dim=1) #0.002
u_ui += ui_u_sim_detach*args.ui_pre_scale
u_ui = F.normalize(u_ui, dim=1)
inputr = torch.cat((u_ui, u_ui), dim=0)
predr = (self.D(inputr))
lossr = - (predr.mean())
gp = self.gradient_penalty(self.D, inputr, inputf.detach())
loss_D = lossr + lossf + args.gp_rate*gp
self.optim_D.zero_grad()
loss_D.backward()
self.optim_D.step()
line_d_loss.append(loss_D.detach().data)
G_ua_embeddings, G_ia_embeddings, G_image_item_embeds, G_text_item_embeds, G_image_user_embeds, G_text_user_embeds \
, G_user_emb, _, G_image_user_id, G_text_user_id, _, _ \
= self.model(self.ui_graph, self.iu_graph, self.image_ui_graph, self.image_iu_graph, self.text_ui_graph, self.text_iu_graph)
G_u_g_embeddings = G_ua_embeddings[users]
G_pos_i_g_embeddings = G_ia_embeddings[pos_items]
G_neg_i_g_embeddings = G_ia_embeddings[neg_items]
G_batch_mf_loss, G_batch_emb_loss, G_batch_reg_loss = self.bpr_loss(G_u_g_embeddings, G_pos_i_g_embeddings, G_neg_i_g_embeddings)
G_image_u_sim = self.u_sim_calculation(users, G_image_user_embeds, G_image_item_embeds)
G_text_u_sim = self.u_sim_calculation(users, G_text_user_embeds, G_text_item_embeds)
G_image_u_sim_detach = G_image_u_sim.detach()
G_text_u_sim_detach = G_text_u_sim.detach()
if idx%args.T==0 and idx!=0:
self.image_ui_graph_tmp = csr_matrix((torch.ones(len(self.image_ui_index['x'])),(self.image_ui_index['x'], self.image_ui_index['y'])), shape=(self.n_users, self.n_items))
self.text_ui_graph_tmp = csr_matrix((torch.ones(len(self.text_ui_index['x'])),(self.text_ui_index['x'], self.text_ui_index['y'])), shape=(self.n_users, self.n_items))
self.image_iu_graph_tmp = self.image_ui_graph_tmp.T
self.text_iu_graph_tmp = self.text_ui_graph_tmp.T
self.image_ui_graph = self.sparse_mx_to_torch_sparse_tensor( \
self.csr_norm(self.image_ui_graph_tmp, mean_flag=True)
).cuda()
self.text_ui_graph = self.sparse_mx_to_torch_sparse_tensor(
self.csr_norm(self.text_ui_graph_tmp, mean_flag=True)
).cuda()
self.image_iu_graph = self.sparse_mx_to_torch_sparse_tensor(
self.csr_norm(self.image_iu_graph_tmp, mean_flag=True)
).cuda()
self.text_iu_graph = self.sparse_mx_to_torch_sparse_tensor(
self.csr_norm(self.text_iu_graph_tmp, mean_flag=True)
).cuda()
self.image_ui_index = {'x':[], 'y':[]}
self.text_ui_index = {'x':[], 'y':[]}
else:
_, image_ui_id = torch.topk(G_image_u_sim_detach, int(self.n_items*args.m_topk_rate), dim=-1)
self.image_ui_index['x'] += np.array(torch.tensor(users).repeat(1, int(self.n_items*args.m_topk_rate)).view(-1)).tolist()
self.image_ui_index['y'] += np.array(image_ui_id.cpu().view(-1)).tolist()
_, text_ui_id = torch.topk(G_text_u_sim_detach, int(self.n_items*args.m_topk_rate), dim=-1)
self.text_ui_index['x'] += np.array(torch.tensor(users).repeat(1, int(self.n_items*args.m_topk_rate)).view(-1)).tolist()
self.text_ui_index['y'] += np.array(text_ui_id.cpu().view(-1)).tolist()
feat_emb_loss = self.feat_reg_loss_calculation(G_image_item_embeds, G_text_item_embeds, G_image_user_embeds, G_text_user_embeds)
batch_contrastive_loss = 0
batch_contrastive_loss1 = self.batched_contrastive_loss(G_image_user_id[users],G_user_emb[users])
batch_contrastive_loss2 = self.batched_contrastive_loss(G_text_user_id[users],G_user_emb[users])
batch_contrastive_loss = batch_contrastive_loss1 + batch_contrastive_loss2
G_inputf = torch.cat((G_image_u_sim, G_text_u_sim), dim=0)
G_predf = (self.D(G_inputf))
G_lossf = -(G_predf.mean())
batch_loss = G_batch_mf_loss + G_batch_emb_loss + G_batch_reg_loss + feat_emb_loss + args.cl_rate*batch_contrastive_loss + args.G_rate*G_lossf #feat_emb_loss
line_var_loss.append(batch_loss.detach().data)
line_g_loss.append(G_lossf.detach().data)
line_cl_loss.append(batch_contrastive_loss.detach().data)
self.optimizer_D.zero_grad()
batch_loss.backward(retain_graph=False)
self.optimizer_D.step()
loss += float(batch_loss)
mf_loss += float(G_batch_mf_loss)
emb_loss += float(G_batch_emb_loss)
reg_loss += float(G_batch_reg_loss)
del ua_embeddings, ia_embeddings, G_ua_embeddings, G_ia_embeddings, G_u_g_embeddings, G_neg_i_g_embeddings, G_pos_i_g_embeddings
if math.isnan(loss) == True:
self.logger.logging('ERROR: loss is nan.')
sys.exit()
if (epoch + 1) % args.verbose != 0:
perf_str = 'Epoch %d [%.1fs]: train==[%.5f=%.5f + %.5f + %.5f + %.5f]' % (
epoch, time() - t1, loss, mf_loss, emb_loss, reg_loss, contrastive_loss)
training_time_list.append(time() - t1)
self.logger.logging(perf_str)
t2 = time()
users_to_test = list(data_generator.test_set.keys())
users_to_val = list(data_generator.val_set.keys())
ret = self.test(users_to_val, is_val=True)
training_time_list.append(t2 - t1)
t3 = time()
loss_loger.append(loss)
rec_loger.append(ret['recall'].data)
pre_loger.append(ret['precision'].data)
ndcg_loger.append(ret['ndcg'].data)
hit_loger.append(ret['hit_ratio'].data)
line_var_recall.append(ret['recall'][1])
line_var_precision.append(ret['precision'][1])
line_var_ndcg.append(ret['ndcg'][1])
tags = ["recall", "precision", "ndcg"]
# tb_writer.add_scalar(tags[0], ret['recall'][1], epoch)
# tb_writer.add_scalar(tags[1], ret['precision'][1], epoch)
# tb_writer.add_scalar(tags[2], ret['ndcg'][1], epoch)
if args.verbose > 0:
perf_str = 'Epoch %d [%.1fs + %.1fs]: train==[%.5f=%.5f + %.5f + %.5f], recall=[%.5f, %.5f, %.5f, %.5f], ' \
'precision=[%.5f, %.5f, %.5f, %.5f], hit=[%.5f, %.5f, %.5f, %.5f], ndcg=[%.5f, %.5f, %.5f, %.5f]' % \
(epoch, t2 - t1, t3 - t2, loss, mf_loss, emb_loss, reg_loss, ret['recall'][0], ret['recall'][1], ret['recall'][2],
ret['recall'][-1],
ret['precision'][0], ret['precision'][1], ret['precision'][2], ret['precision'][-1], ret['hit_ratio'][0], ret['hit_ratio'][1], ret['hit_ratio'][2], ret['hit_ratio'][-1],
ret['ndcg'][0], ret['ndcg'][1], ret['ndcg'][2], ret['ndcg'][-1])
self.logger.logging(perf_str)
if ret['recall'][1] > best_recall:
best_recall = ret['recall'][1]
test_ret = self.test(users_to_test, is_val=False)
self.logger.logging("Test_Recall@%d: %.5f, precision=[%.5f], ndcg=[%.5f]" % (eval(args.Ks)[1], test_ret['recall'][1], test_ret['precision'][1], test_ret['ndcg'][1]))
stopping_step = 0
elif stopping_step < args.early_stopping_patience:
stopping_step += 1
self.logger.logging('#####Early stopping steps: %d #####' % stopping_step)
else:
self.logger.logging('#####Early stop! #####')
break
self.logger.logging(str(test_ret))
return best_recall, run_time
def bpr_loss(self, users, pos_items, neg_items):
pos_scores = torch.sum(torch.mul(users, pos_items), dim=1)
neg_scores = torch.sum(torch.mul(users, neg_items), dim=1)
regularizer = 1./2*(users**2).sum() + 1./2*(pos_items**2).sum() + 1./2*(neg_items**2).sum()
regularizer = regularizer / self.batch_size
maxi = F.logsigmoid(pos_scores - neg_scores)
mf_loss = -torch.mean(maxi)
emb_loss = self.decay * regularizer
reg_loss = 0.0
return mf_loss, emb_loss, reg_loss
def sparse_mx_to_torch_sparse_tensor(self, sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def set_seed(seed):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
set_seed(args.seed)
config = dict()
config['n_users'] = data_generator.n_users
config['n_items'] = data_generator.n_items
trainer = Trainer(data_config=config)
trainer.train() | Tiktokx-main | tiktokx/train.py |
from setuptools import setup, find_packages
setup(
name = 'PaLM-rlhf-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.1',
license='MIT',
description = 'PaLM + Reinforcement Learning with Human Feedback - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/PaLM-rlhf-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'reinforcement learning',
'human feedback'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6',
'lion-pytorch',
'torch>=1.6',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| PaLM-rlhf-pytorch-main | setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from lion_pytorch import Lion
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from palm_rlhf_pytorch import PaLM
from accelerate import Accelerator
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# accelerator
accelerator = Accelerator()
device = accelerator.device
# instantiate palm
model = PaLM(
num_tokens=256,
dim=512,
depth=8,
flash_attn=True
).to(device)
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.to(device)
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Lion(model.palm_parameters(), lr = LEARNING_RATE)
model, optim, train_loader, val_loader = accelerator.prepare(
model, optim, train_loader, val_loader
)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
accelerator.backward(loss / GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"training loss: {loss.item()}")
accelerator.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
accelerator.print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
accelerator.print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(GENERATE_LENGTH, inp[None, ...])
output_str = decode_tokens(sample[0])
accelerator.print(output_str, "\n")
| PaLM-rlhf-pytorch-main | train.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attention(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
use_flash_attn = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash_attn = use_flash_attn
assert not (use_flash_attn and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash_attn:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash_attn:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
return out
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/attention.py |
import math
import copy
from pathlib import Path
from collections import namedtuple
from functools import wraps
from itertools import zip_longest
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from palm_rlhf_pytorch.attention import Attention
from palm_rlhf_pytorch.utils import top_p, top_k, masked_mean, gumbel_sample, eval_decorator
from palm_rlhf_pytorch.lora import LoRA
# functions and decorators
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def identity(t, *args, **kwargs):
return t
def l2norm(t):
return F.normalize(t, dim = -1)
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
y = self.fn(x, **kwargs)
if not any([t.requires_grad for t in (x, y)]):
return x.add_(y)
return y + x
# rotary positional embedding w/ xpos
# https://arxiv.org/abs/2104.09864
# https://arxiv.org/abs/2212.10554v1
class RotaryEmbedding(nn.Module):
def __init__(self, dim, scale_base = 512, use_xpos = True):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.use_xpos = use_xpos
self.scale_base = scale_base
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not self.use_xpos:
return freqs, torch.ones(1, device = device)
power = (t - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t, scale = 1.):
return (t * pos.cos() * scale) + (rotate_half(t) * pos.sin() * scale)
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
causal = True,
heads = 8,
qk_rmsnorm = False,
qk_scale = 8,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
use_xpos = True,
xpos_scale_base = 512,
flash_attn = False,
):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.qk_rmsnorm = qk_rmsnorm
if qk_rmsnorm:
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.attend = Attention(
causal = causal,
dropout = attn_dropout,
use_flash_attn = flash_attn
)
self.heads = heads
self.scale = (dim_head ** -0.5) if not qk_rmsnorm else qk_scale
self.causal = causal
self.rotary_emb = RotaryEmbedding(dim_head, scale_base = xpos_scale_base, use_xpos = use_xpos and causal)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.flash_attn = flash_attn
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.attn_dropout = nn.Dropout(attn_dropout)
self.flash_attn_dropout = attn_dropout
# parallel feedforward tail
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Dropout(ff_dropout),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("pos_emb", None, persistent=False)
self.register_buffer("pos_emb_scale", None, persistent=False)
def get_rotary_embedding(self, n, device):
if exists(self.pos_emb) and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n], self.pos_emb_scale[:n]
pos_emb, scale = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
self.register_buffer("pos_emb_scale", scale, persistent=False)
return pos_emb, scale
def forward(
self,
x,
mask = None,
finetune_modules = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# finetune loras
lora_q = lora_k = lora_v = lora_o = None
if exists(finetune_modules):
lora_q, lora_k, lora_v, lora_o = finetune_modules
q = q + lora_q(x)
k = k + lora_k(x)
v = v + lora_v(x)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# qk rmsnorm
if self.qk_rmsnorm:
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# rotary embeddings with xpos decay for better length extrapolation
positions, scale = self.get_rotary_embedding(n, device)
q = apply_rotary_pos_emb(positions, q, scale)
k = apply_rotary_pos_emb(positions, k, scale ** -1)
# attention function, either regular or flash
out = self.attend(q, k, v, mask = mask)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
attn_out = self.attn_out(out)
ff_out = self.ff_out(ff)
if exists(lora_o):
attn_out = attn_out + lora_o(out)
return attn_out + ff_out
# transformer
@beartype
class PaLM(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
causal = True,
dim_head = 64,
heads = 8,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
qk_rmsnorm = False,
lora_r = 8,
rotary_xpos_scale_base = 512,
flash_attn = False,
finetune_scopes = tuple(),
cross_entropy_ignore_index = 0
):
super().__init__()
self.dim = dim
self.dim_head = dim_head
self.heads = heads
self.causal = causal
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
block = Residual(ParallelTransformerBlock(
dim = dim,
causal = causal,
dim_head = dim_head,
heads = heads,
qk_rmsnorm = qk_rmsnorm,
ff_mult = ff_mult,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
xpos_scale_base = rotary_xpos_scale_base,
flash_attn = flash_attn
))
self.layers.append(block)
self.norm = LayerNorm(dim)
self.to_logits = nn.Linear(dim, num_tokens, bias=False)
self.to_logits.weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
# fine tuning related
self.lora_r = lora_r
self.finetune_modules = nn.ModuleDict({})
for scope in finetune_scopes:
self.add_finetune_params(scope)
# loss related
self.cross_entropy_ignore_index = cross_entropy_ignore_index
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(str(path)))
def set_dropout(self, dropout):
for module in self.layers.modules():
if isinstance(module, nn.Dropout):
module.p = dropout
return self
def add_finetune_params(self, scope, lora_r = None):
assert scope not in self.finetune_modules, f'finetune scope {scope} already found'
dim, dim_head, heads, r, device = self.dim, self.dim_head, self.heads, default(lora_r, self.lora_r), self.device
q_inner_dim = heads * dim_head
kv_inner_dim = dim_head
lora_modules = nn.ModuleList([])
for _ in range(len(self.layers)):
lora_modules.append(nn.ModuleList([
LoRA(dim, q_inner_dim, r = r), # queries
LoRA(dim, kv_inner_dim, r = r), # keys
LoRA(dim, kv_inner_dim, r = r), # values
LoRA(q_inner_dim, dim, r = r) # wo
]))
self.finetune_modules[scope] = lora_modules.to(device)
def remove_finetune_params(self, scope):
assert scope in self.finetune_modules, f'finetune scope {scope} not found'
return self.finetune_modules.pop(scope)
@torch.no_grad()
def merge_finetune_params(self, scope):
""" in the case one wants to merge the fine-tuned actor LORA parameters and do multiple rounds of fine tuning off different reward models """
assert scope in self.finetune_modules, f'finetune scope {scope} not found'
lora_modules = self.finetune_modules.pop(scope)
for layer, (lora_q, lora_k, lora_v, lora_o) in zip(self.layers, lora_modules):
block = layer.fn
fused_attn_ff_weight = block.fused_attn_ff_proj.weight
attn_out_weight = block.attn_out.weight
fused_proj_out_dim = fused_attn_ff_weight.shape[0]
lora_qkv_weight, _ = pack([lora_q.weight, lora_k.weight, lora_v.weight], 'i *')
lora_qkv_weight = F.pad(lora_qkv_weight, (0, fused_proj_out_dim - lora_qkv_weight.shape[1]))
lora_qkv_weight = rearrange(lora_qkv_weight, 'i o -> o i')
lora_o_weight = rearrange(lora_o.weight, 'i o -> o i')
fused_attn_ff_weight.add_(lora_qkv_weight)
attn_out_weight.add_(lora_o_weight)
# researcher train palm parameters first
# before finetuning
def palm_parameters(self):
return set(self.parameters()) - set(self.finetune_modules.parameters())
def finetune_parameters(self, scope = 'default'):
assert scope in self.finetune_modules, f'finetune parameters of scope {scope} not found'
return self.finetune_modules[scope].parameters()
# generate function
@torch.no_grad()
@eval_decorator
def generate(
self,
seq_len,
prompt = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
pad_value = 0.,
eos_token = None,
return_seq_without_prompt = True,
use_tqdm = False,
**kwargs
):
if not exists(prompt):
prompt = torch.randint(0, self.num_tokens, (1, 1))
prompt = prompt.to(self.device)
return_seq_without_prompt = False
prompt, leading_dims = pack([prompt], '* n')
n, out = prompt.shape[-1], prompt.clone()
wrapper_fn = identity if not use_tqdm else tqdm
sample_num_times = max(1, seq_len - prompt.shape[-1])
for _ in wrapper_fn(range(sample_num_times)):
logits, embeds = self.forward(out, return_logits_with_embedding = True, **kwargs)
logits, embeds = logits[:, -1], embeds[:, -1]
if exists(filter_logits_fn):
logits = filter_logits_fn(logits, thres = filter_thres)
sample = gumbel_sample(logits, temperature = temperature, dim = -1)
out, _ = pack([out, sample], 'b *')
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, pad_value)
break
out, = unpack(out, leading_dims, '* n')
if not return_seq_without_prompt:
return out
return out[..., n:]
def forward(
self,
x,
return_loss = False,
disable_lora = False,
finetune_scope = None,
extra_embed = None,
return_only_embedding = False,
return_logits_with_embedding = False
):
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
# mask if encoder
# treat any token ids that are negative as tokens to mask out - only needed if not autoregressive
if not self.causal:
mask = x >= 0
x = x.masked_fill(~mask, 0)
else:
mask = None
# get token embedding
x = self.token_emb(x)
if exists(extra_embed):
x = x + extra_embed
# finetune modules
finetune_modules = tuple()
if exists(finetune_scope) and not disable_lora:
assert finetune_scope in self.finetune_modules
finetune_modules = self.finetune_modules[finetune_scope]
# parallel attention / ff blocks, passing in finetuning loras
for layer, finetune_modules in zip_longest(self.layers, finetune_modules):
x = layer(x, mask = mask, finetune_modules = finetune_modules)
# final norm
embeds = self.norm(x)
if return_only_embedding:
return embeds
# to logits
logits = self.to_logits(embeds)
ret = (logits, embeds) if return_logits_with_embedding else logits
if not return_loss:
return ret
logits = rearrange(logits, 'b n c -> b c n')
return F.cross_entropy(logits, labels, ignore_index = self.cross_entropy_ignore_index) | PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/palm.py |
from palm_rlhf_pytorch.palm import PaLM
from palm_rlhf_pytorch.reward import RewardModel
from palm_rlhf_pytorch.ppo import RLHFTrainer, ActorCritic
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/__init__.py |
import math
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
# decorators
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# tensor helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def masked_mean(seq, mask = None, dim = 1, keepdim = False):
if not exists(mask):
return seq.mean(dim = dim)
if seq.ndim == 3:
mask = rearrange(mask, 'b n -> b n 1')
masked_seq = seq.masked_fill(~mask, 0.)
numer = masked_seq.sum(dim = dim, keepdim = keepdim)
denom = mask.sum(dim = dim, keepdim = keepdim)
masked_mean = numer / denom.clamp(min = 1e-3)
masked_mean = masked_mean.masked_fill(denom == 0, 0.)
return masked_mean
# sampling helpers
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/utils.py |
from torch.optim import AdamW, Adam
from lion_pytorch import Lion
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
use_lion = True,
**kwargs
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and wd > 0:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
if use_lion:
return Lion(params, lr = lr, betas = betas, weight_decay = wd)
if wd == 0:
return Adam(params, lr = lr, betas = betas, eps = eps)
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/optimizer.py |
import torch
from torch import nn
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# LoRA - https://arxiv.org/abs/2106.09685
class LoRA(nn.Module):
def __init__(
self,
dim,
dim_out,
r = 8,
alpha = None
):
super().__init__()
alpha = default(alpha, r)
self.scale = alpha / r
self.A = nn.Parameter(torch.randn(dim, r))
self.B = nn.Parameter(torch.zeros(r, dim_out))
@property
def weight(self):
return (self.A @ self.B) * self.scale
def forward(self, x):
return x @ self.weight
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/lora.py |
import math
from pathlib import Path
import copy
from tqdm import tqdm
from functools import partial
from collections import deque, namedtuple
from random import randrange
from beartype import beartype
from beartype.typing import List, Optional, Callable, Deque
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from palm_rlhf_pytorch.palm import PaLM
from palm_rlhf_pytorch.reward import RewardModel
from palm_rlhf_pytorch.optimizer import get_optimizer
from palm_rlhf_pytorch.utils import masked_mean, eval_decorator
from accelerate import Accelerator
# actor critic - PaLM with lora
PPOActionCriticReturn = namedtuple('PPOActionCriticReturn', [
'actions',
'sequence',
'mask',
'prompt_mask',
'action_logits',
'values'
])
@beartype
class ActorCritic(nn.Module):
def __init__(
self,
palm: PaLM,
critic_palm: Optional[PaLM] = None,
pooled_values = False,
actor_lora = True,
critic_lora = True,
actor_lora_r = 8,
critic_lora_r = 8,
actor_lora_scope = 'actor',
critic_lora_scope = 'critic',
actor_dropout = 0.,
critic_dropout = 0.
):
super().__init__()
self.actor_palm = palm
self.critic_palm = critic_palm
if not exists(self.critic_palm):
self.critic_palm = copy.deepcopy(palm)
self.actor_palm.set_dropout(actor_dropout)
self.critic_palm.set_dropout(critic_dropout)
self.actor_lora = actor_lora
self.critic_lora = critic_lora
self.actor_lora_scope = actor_lora_scope if actor_lora else None
self.critic_lora_scope = critic_lora_scope if critic_lora else None
if self.actor_lora:
self.actor_palm.add_finetune_params(actor_lora_scope, lora_r = actor_lora_r)
if self.critic_lora:
self.critic_palm.add_finetune_params(critic_lora_scope, lora_r = critic_lora_r)
self.pooled_values = pooled_values
self.value_head = nn.Sequential(
nn.Linear(palm.dim, 1),
Rearrange('... 1 -> ...')
)
nn.init.zeros_(self.value_head[0].bias)
nn.init.orthogonal_(self.value_head[0].weight, gain = math.sqrt(2))
def actor_parameters(self):
if not self.actor_lora:
return self.actor_palm.parameters()
return [
*self.actor_palm.finetune_parameters(self.actor_lora_scope)
]
def critic_parameters(self):
if not self.actor_lora:
return [*self.critic_palm.parameters(), *self.value_head.parameters()]
return [
*self.critic_palm.finetune_parameters(self.critic_lora_scope),
*self.value_head.parameters()
]
@torch.no_grad()
@eval_decorator
def generate(
self,
state,
max_seq_len,
eos_token = None,
return_values = False,
**kwargs
):
actions = self.actor_palm.generate(
max_seq_len,
prompt = state,
eos_token = eos_token,
finetune_scope = self.actor_lora_scope,
use_tqdm = True,
**kwargs
)
sequence = torch.cat((state, actions), dim = -1)
action_len = actions.shape[-1]
state_len = state.shape[-1]
prompt_mask = torch.arange(sequence.shape[-1], device = state.device) < state_len
prompt_mask = repeat(prompt_mask, 'n -> b n', b = sequence.shape[0])
action_mask = ~prompt_mask
mask = None
if exists(eos_token):
mask = ((sequence == eos_token).cumsum(dim = -1) == 0)
mask = F.pad(mask, (1, -1), value = True) # include eos token
action_mask &= mask
action_logits, value = self.forward(
sequence,
mask = action_mask,
return_values = return_values
)
return PPOActionCriticReturn(
actions,
sequence,
mask,
prompt_mask,
action_logits,
value
)
def forward(
self,
x,
mask = None,
return_values = True
):
action_logits = self.actor_palm(
x,
finetune_scope = self.actor_lora_scope
)
if not return_values:
return action_logits, None
critic_embeds = self.critic_palm(
x,
return_only_embedding = True,
finetune_scope = self.critic_lora_scope
)
if self.pooled_values:
critic_embeds = shift(critic_embeds, shift = 1, dim = -2)
critic_embeds = masked_mean(critic_embeds, mask, dim = 1)
values = self.value_head(critic_embeds)
return action_logits, values
# data
Memory = namedtuple('Memory', [
'sequence',
'prompt_mask',
'mask',
'action_prob',
'action_log_prob',
'reward',
'value'
])
@beartype
class ExperienceDataset(Dataset):
def __init__(
self,
data: List[torch.Tensor],
device = None
):
super().__init__()
self.data = data
self.device = device
def __len__(self):
return self.data[0].shape[0]
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind].to(self.device), self.data))
def create_dataloader(data, batch_size, shuffle = True, device = None, **kwargs):
ds = ExperienceDataset(data, device = device)
return DataLoader(ds, batch_size = batch_size, shuffle = shuffle, **kwargs)
# helper functions
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def masked_normalize(t, eps = 1e-5, mask = None, dim = None):
dim = default(dim, tuple(range(t.ndim)))
kwargs = dict(dim = dim, keepdim = True)
mean = masked_mean(t, mask = mask, **kwargs)
mean_centered = t - mean
var = masked_mean(mean_centered ** 2, mask = mask, **kwargs)
return mean_centered * var.clamp(min = eps).rsqrt()
def pad_sequence_fixed(sequences, *args, **kwargs):
first_el = sequences[0]
has_no_dimension = first_el.ndim == 0
# if no dimensions, add a single dimension
if has_no_dimension:
sequences = tuple(map(lambda t: t[None], sequences))
out = pad_sequence(sequences, *args, **kwargs)
if has_no_dimension:
out = rearrange(out, '... 1 -> ...')
return out
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def log_prob(prob, indices):
assert prob.shape[:2] == indices.shape, f'preceding shapes of prob {prob.shape[:2]} and indices {indices.shape} must match'
return log(prob.gather(-1, indices[..., None])).squeeze(-1)
def shift(t, value = 0, shift = 1, dim = -1):
zeros = (0, 0) * (-dim - 1)
return F.pad(t, (*zeros, shift, -shift), value = value)
def masked_entropy(prob, dim = -1, mask = None):
entropies = (prob * log(prob)).sum(dim = -1)
return masked_mean(entropies, mask = mask).mean()
def masked_kl_div(prob1, prob2, mask = None, reduce_batch = False):
"""
need to account for variable sequence lengths, therefore not using the built-in functional version
"""
kl_divs = (prob1 * (log(prob1) - log(prob2))).sum(dim = -1)
loss = masked_mean(kl_divs, mask)
if reduce_batch:
return loss.mean()
return loss
def clipped_value_loss(values, rewards, old_values, clip):
value_clipped = old_values + (values - old_values).clamp(-clip, clip)
value_loss_1 = (value_clipped.flatten() - rewards) ** 2
value_loss_2 = (values.flatten() - rewards) ** 2
return torch.mean(torch.max(value_loss_1, value_loss_2))
# rlhf trainer
@beartype
class RLHFTrainer(nn.Module):
def __init__(
self,
*,
prompts: Optional[List[str]] = None,
prompts_path: Optional[str] = None,
prompt_token_ids: Optional[torch.Tensor] = None,
tokenizer: Callable = None,
palm: PaLM,
reward_model: RewardModel,
critic_palm: Optional[PaLM] = None,
actor_critic: Optional[ActorCritic] = None,
actor_lr = 1e-4,
critic_lr = 1e-4,
actor_wd = 0.,
critic_wd = 0.,
actor_adam_eps = 1e-7,
critic_adam_eps = 1e-7,
actor_lora = True,
critic_lora = True,
actor_lora_r = 8,
critic_lora_r = 8,
critic_pooled_values = True,
actor_dropout = 0.,
critic_dropout = 0.,
betas = (0.9, 0.999),
max_norm = None,
eps_clip = 0.2,
value_clip = 0.4,
beta_s = .01,
pad_value = 0.,
minibatch_size = 16,
epochs = 1,
kl_div_loss_weight = 0.1, # between old action probs and new action probs - not sure what the right value is
accelerate_kwargs: dict = {},
use_lion = False
):
super().__init__()
self.accelerate = Accelerator(**accelerate_kwargs)
# take care of prompts -> token ids
assert (exists(prompts) + exists(prompts_path) + exists(prompt_token_ids)) == 1
if exists(prompts_path):
path = Path(prompts_path)
prompts = path.read_text().split('\n')
if exists(prompts):
assert len(prompts) > 0, 'no prompts'
assert exists(tokenizer), 'tokenizer must be passed in if raw text prompts are given'
prompt_token_ids = tokenizer(prompts)
self.pad_value = pad_value # token pad value
self.num_prompts = prompt_token_ids.shape[0]
self.register_buffer('prompt_token_ids', prompt_token_ids)
# models
self.palm = palm
if not exists(actor_critic):
actor_critic = ActorCritic(
palm = palm,
critic_palm = critic_palm,
actor_lora = actor_lora,
critic_lora = critic_lora,
actor_lora_r = actor_lora_r,
critic_lora_r = critic_lora_r,
pooled_values = critic_pooled_values,
actor_dropout = actor_dropout,
critic_dropout = critic_dropout
).to(palm.device)
self.actor_critic = actor_critic
self.reward_model = reward_model.eval()
# train hyperparameters
self.epochs = epochs
self.minibatch_size = minibatch_size
self.max_norm = max_norm
self.kl_div_loss_weight = kl_div_loss_weight
# optimizers
self.actor_optim = get_optimizer(actor_critic.actor_parameters(), lr = actor_lr, wd = actor_wd, betas = betas, eps = actor_adam_eps, use_lion = use_lion)
self.critic_optim = get_optimizer(actor_critic.critic_parameters(), lr = critic_lr, wd = critic_wd, betas = betas, eps = critic_adam_eps, use_lion = use_lion)
# ppo hyperparams
self.eps_clip = eps_clip
self.value_clip = value_clip
self.beta_s = beta_s
# prepare with accelerator
(
self.actor_critic,
self.reward_model,
self.actor_optim,
self.critic_optim
) = self.accelerate.prepare(
self.actor_critic,
self.reward_model,
self.actor_optim,
self.critic_optim
)
def print(self, msg):
return self.accelerate.print(msg)
def save(self, filepath = './checkpoint.pt'):
torch.save(self.actor_critic.state_dict(), filepath)
def load(self, filepath = './checkpoint.pt'):
state_dict = torch.load(filepath)
self.actor_critic.load_state_dict(state_dict)
@property
def device(self):
return self.accelerate.device
@torch.no_grad()
def generate(
self,
max_seq_len,
*args,
prompt,
num_samples = 4, # sample 4 per prompt and select the one with highest reward
**kwargs
):
assert prompt.ndim == 1, 'only one prompt allowed at a time for now'
prompt = repeat(prompt, 'n -> b n', b = num_samples)
actor_critic = self.accelerate.unwrap_model(self.actor_critic)
reward_model = self.accelerate.unwrap_model(self.reward_model)
actor_critic.eval()
(
actions,
sequences,
mask,
prompt_mask,
action_logits,
_
) = actor_critic.generate(
prompt,
*args,
max_seq_len = max_seq_len,
return_values = False,
**kwargs
)
rewards = reward_model(
sequences,
prompt_mask = prompt_mask,
mask = mask,
sample = True
)
best_sequence_index = rewards.topk(1, dim = -1).indices
best_sequence = sequences[best_sequence_index]
best_sequence = rearrange(best_sequence, '1 ... -> ...')
return best_sequence
def learn(
self,
memories: Deque[Memory]
):
# stack all data stored in the memories
all_memories_stacked_and_padded = list(map(partial(pad_sequence_fixed, batch_first = True), zip(*memories)))
# prepare dataloader for policy phase training
dl = create_dataloader(all_memories_stacked_and_padded, self.minibatch_size, device = self.device)
self.actor_critic.train()
# PPO training
for _ in range(self.epochs):
for (
sequences,
prompt_masks,
masks,
old_action_probs,
old_log_probs,
rewards,
old_values
) in dl:
action_masks = ~prompt_masks & masks
action_logits, values = self.actor_critic(
sequences,
mask = action_masks
)
action_logits = shift(action_logits, shift = 1, dim = -2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
action_len = old_log_probs.shape[-1]
action_probs = action_logits.softmax(dim = -1)
action_log_probs = log_prob(action_probs, sequences)
action_log_probs = action_log_probs[:, -action_len:]
# calculate entropies, taking into account which part of the sequence is actually an action
entropies = masked_entropy(action_probs, mask = action_masks)
# calculate kl div between old action probs and new ones, taking into account which part of the sequence is action or not
kl_penalty = 0.
if self.kl_div_loss_weight > 0:
kl_penalty = masked_kl_div(old_action_probs, action_probs, mask = action_masks) * self.kl_div_loss_weight
# subtract the kl penalty from the rewards
rewards = rewards - kl_penalty
# handle non-pooled values
normalize_kwargs = dict()
if old_values.ndim == 2:
old_values, values = map(lambda t: shift(t, shift = 1, dim = -2), (old_values, values))
old_values = old_values[:, -action_len:]
values = values[:, -action_len:]
rewards = rearrange(rewards, 'b -> b 1')
normalize_kwargs = dict(dim = -1, mask = action_masks[:, -action_len:])
if values.ndim < rewards.ndim:
values = rearrange(values, '... -> ... 1')
# calculate clipped surrogate objective, classic PPO loss
ratios = (action_log_probs - old_log_probs).exp()
advantages = masked_normalize(rewards - old_values, **normalize_kwargs)
if advantages.ndim == 1:
advantages = rearrange(advantages, 'b -> b 1')
surr1 = ratios * advantages
surr2 = ratios.clamp(1 - self.eps_clip, 1 + self.eps_clip) * advantages
policy_loss = - torch.min(surr1, surr2) - self.beta_s * entropies
# combine losses
loss = policy_loss.mean()
# update actor
self.accelerate.backward(loss)
self.print(f'policy_loss: {loss.item():.3f}')
if exists(self.max_norm):
self.accelerator.clip_grad_norm_(self.actor_critic.actor_parameters(), self.max_norm)
self.actor_optim.step()
self.actor_optim.zero_grad()
# calculate value loss and update value network separate from policy network
value_loss = clipped_value_loss(values, rewards.detach(), old_values, self.value_clip)
value_loss = value_loss.mean()
self.print(f'critic_loss: {value_loss.item():.3f}')
self.accelerate.backward(value_loss)
if exists(self.max_norm):
self.accelerator.clip_grad_norm_(self.actor_critic.critic_parameters(), self.max_norm)
self.critic_optim.step()
self.critic_optim.zero_grad()
def train(
self,
num_episodes = 50000,
max_timesteps = 500,
update_timesteps = 5000,
max_batch_size = 16,
max_seq_len = 2048,
eos_token = None,
temperature = 1.
):
device = self.device
time = 0
memories = deque([])
for eps in tqdm(range(num_episodes), desc = 'episodes'):
for timestep in range(max_timesteps):
time += 1
# select a bunch of random states (prompts)
# and get the action (sampled sequence from palm as well as the action probs)
# also calculate the reward using reward model and store
rand_prompt_index = randrange(0, self.num_prompts)
state = self.prompt_token_ids[rand_prompt_index]
# remove padding from state
state_mask = state != self.pad_value
state = state[state_mask]
# get predicted sequence
(
actions,
sequence,
mask,
prompt_mask,
action_logits,
value
) = self.actor_critic.generate(
rearrange(state, 'n -> 1 n'),
max_seq_len = max_seq_len,
eos_token = eos_token,
temperature = temperature,
return_values = True
)
action_logits = shift(action_logits, shift = 1, dim = -2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
action_prob = action_logits.softmax(dim = -1)
action_len = actions.shape[-1]
action_log_prob = log_prob(action_prob, sequence)
action_log_prob = action_log_prob[:, -action_len:]
actions = rearrange(actions, '1 ... -> ...')
# get reward as given by supervised trained reward model
sequence = torch.cat((state, actions), dim = 0)
prompt_length = len(state)
prompt_mask = torch.arange(sequence.shape[-1], device = device) < prompt_length
sequence = rearrange(sequence, 'n -> 1 n')
prompt_mask = rearrange(prompt_mask, 'n -> 1 n')
mask = default(mask, lambda: torch.ones(sequence.shape, dtype = torch.bool, device = device))
reward = self.reward_model(
sequence,
prompt_mask = prompt_mask,
mask = mask,
sample = True
)
detach_to_cpu_ = lambda t: rearrange(t.detach().cpu(), '1 ... -> ...')
# store memory for learning
memories.append(Memory(*map(detach_to_cpu_, (
sequence,
prompt_mask,
mask,
action_prob,
action_log_prob,
reward,
value
))))
# learn from the stored memories
if time % update_timesteps == 0:
self.learn(memories)
memories.clear()
print('rlhf training complete')
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/ppo.py |
import copy
from pathlib import Path
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from palm_rlhf_pytorch.utils import masked_mean, gumbel_sample
from palm_rlhf_pytorch.palm import PaLM
# helper functions
def exists(val):
return val is not None
# Reward Model - PaLM with a scalar head
@beartype
class RewardModel(nn.Module):
def __init__(
self,
palm: PaLM,
dropout = 0.1,
num_binned_output = 0.,
use_lora = True,
lora_r = 8,
reward_lora_scope = 'reward',
):
super().__init__()
self.palm = copy.deepcopy(palm)
self.palm.set_dropout(dropout)
self.reward_lora_scope = reward_lora_scope if use_lora else None
if exists(self.reward_lora_scope):
self.palm.add_finetune_params(reward_lora_scope, lora_r = lora_r)
dim = palm.dim
self.binned_output = num_binned_output > 1
self.prompt_embed = nn.Parameter(torch.zeros(1, 1, dim))
self.response_embed = nn.Parameter(torch.zeros(1, 1, dim))
if self.binned_output:
self.to_pred = nn.Linear(dim, num_binned_output)
else:
self.to_pred = nn.Sequential(
nn.Linear(dim, 1, bias = False),
Rearrange('... 1 -> ...')
)
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(str(path)))
def finetune_parameters(self):
return [
*self.to_pred.parameters(),
*(self.palm.finetune_parameters(self.reward_lora_scope) if exists(self.reward_lora_scope) else self.palm.parameters())
]
def forward(
self,
x,
mask = None,
prompt_mask = None,
prompt_lengths = None,
labels = None,
sample = False,
sample_temperature = 1.,
disable_lora = False
):
assert not (exists(prompt_mask) and exists(prompt_lengths))
# derive prompt mask from prompt lengths
if exists(prompt_lengths):
batch, seq_len = x.shape
arange = torch.arange(seq_len, device = x.device)
prompt_mask = repeat(arange, 'n -> b n', b = batch) < rearrange(prompt_lengths, 'b -> b 1')
# reward model should have an understanding of which section is prompt, and which section is response
extra_embed = None
if exists(prompt_mask):
extra_embed = torch.where(
rearrange(prompt_mask, 'b n -> b n 1'),
self.prompt_embed,
self.response_embed
)
# get embeddings from palm
embeds = self.palm(
x,
extra_embed = extra_embed,
return_only_embedding = True,
disable_lora = disable_lora,
finetune_scope = self.reward_lora_scope
)
pooled = masked_mean(embeds, mask, dim = 1)
pred = self.to_pred(pooled)
if sample and self.binned_output:
assert not exists(labels)
pred = gumbel_sample(pred, temperature = sample_temperature, dim = -1)
if not exists(labels):
return pred
if not self.binned_output:
return F.mse_loss(pred, labels)
return F.cross_entropy(pred, labels)
| PaLM-rlhf-pytorch-main | palm_rlhf_pytorch/reward.py |
# --------------------------------------------------------
# SEEM -- Segment Everything Everywhere All At Once
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected]), Jianwei Yang ([email protected])
# --------------------------------------------------------
import os
import warnings
import PIL
from PIL import Image
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
import gradio as gr
import torch
import argparse
import whisper
import numpy as np
from gradio import processing_utils
from xdecoder.BaseModel import BaseModel
from xdecoder import build_model
from utils.distributed import init_distributed
from utils.arguments import load_opt_from_config_files
from utils.constants import COCO_PANOPTIC_CLASSES
from tasks import *
def parse_option():
parser = argparse.ArgumentParser('SEEM Demo', add_help=False)
parser.add_argument('--conf_files', default="configs/seem/seem_focall_lang.yaml", metavar="FILE", help='path to config file', )
args = parser.parse_args()
return args
'''
build args
'''
args = parse_option()
opt = load_opt_from_config_files(args.conf_files)
opt = init_distributed(opt)
# META DATA
cur_model = 'None'
if 'focalt' in args.conf_files:
pretrained_pth = os.path.join("seem_focalt_v2.pt")
if not os.path.exists(pretrained_pth):
os.system("wget {}".format("https://huggingface.co/xdecoder/SEEM/resolve/main/seem_focalt_v2.pt"))
cur_model = 'Focal-T'
elif 'focal' in args.conf_files:
pretrained_pth = os.path.join("seem_focall_v1.pt")
if not os.path.exists(pretrained_pth):
os.system("wget {}".format("https://huggingface.co/xdecoder/SEEM/resolve/main/seem_focall_v1.pt"))
cur_model = 'Focal-L'
'''
build model
'''
model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth).eval().cuda()
with torch.no_grad():
model.model.sem_seg_head.predictor.lang_encoder.get_text_embeddings(COCO_PANOPTIC_CLASSES + ["background"], is_eval=True)
'''
audio
'''
audio = whisper.load_model("base")
@torch.no_grad()
def inference(image, task, *args, **kwargs):
with torch.autocast(device_type='cuda', dtype=torch.float16):
if 'Video' in task:
return interactive_infer_video(model, audio, image, task, *args, **kwargs)
else:
return interactive_infer_image(model, audio, image, task, *args, **kwargs)
class ImageMask(gr.components.Image):
"""
Sets: source="canvas", tool="sketch"
"""
is_template = True
def __init__(self, **kwargs):
super().__init__(source="upload", tool="sketch", interactive=True, **kwargs)
def preprocess(self, x):
return super().preprocess(x)
class Video(gr.components.Video):
"""
Sets: source="canvas", tool="sketch"
"""
is_template = True
def __init__(self, **kwargs):
super().__init__(source="upload", **kwargs)
def preprocess(self, x):
return super().preprocess(x)
'''
launch app
'''
title = "SEEM: Segment Everything Everywhere All At Once"
description = """
<div style="text-align: center; font-weight: bold;">
<span style="font-size: 18px" id="paper-info">
[<a href="https://github.com/UX-Decoder/Segment-Everything-Everywhere-All-At-Once" target="_blank">GitHub</a>]
[<a href="https://arxiv.org/pdf/2304.06718.pdf" target="_blank">arXiv</a>]
</span>
</div>
<div style="text-align: left; font-weight: bold;">
<br>
🌪 Note: The current model is run on <span style="color:blue;">SEEM {}</span>, for <span style="color:blue;">best performance</span> refer to <a href="https://huggingface.co/spaces/xdecoder/SEEM" target="_blank"><span style="color:red;">our demo</span></a>.
</p>
</div>
""".format(cur_model)
'''Usage
Instructions:
🎈 Try our default examples first (Sketch is not automatically drawed on input and example image);
🎈 For video demo, it takes about 30-60s to process, please refresh if you meet an error on uploading;
🎈 Upload an image/video (If you want to use referred region of another image please check "Example" and upload another image in referring image panel);
🎈 Select at least one type of prompt of your choice (If you want to use referred region of another image please check "Example");
🎈 Remember to provide the actual prompt for each promt type you select, otherwise you will meet an error (e.g., rember to draw on the referring image);
🎈 Our model by default support the vocabulary of COCO 133 categories, others will be classified to 'others' or misclassifed.
'''
article = "The Demo is Run on SEEM-Tiny."
inputs = [ImageMask(label="[Stroke] Draw on Image",type="pil"), gr.inputs.CheckboxGroup(choices=["Stroke", "Example", "Text", "Audio", "Video", "Panoptic"], type="value", label="Interative Mode"), ImageMask(label="[Example] Draw on Referring Image",type="pil"), gr.Textbox(label="[Text] Referring Text"), gr.Audio(label="[Audio] Referring Audio", source="microphone", type="filepath"), gr.Video(label="[Video] Referring Video Segmentation",format="mp4",interactive=True)]
gr.Interface(
fn=inference,
inputs=inputs,
outputs=[
gr.outputs.Image(
type="pil",
label="Segmentation Results (COCO classes as label)"),
gr.Video(
label="Video Segmentation Results (COCO classes as label)", format="mp4"
),
],
examples=[
["examples/corgi1.webp", ["Text"], "examples/corgi2.jpg", "The corgi.", None, None],
["examples/river1.png", ["Text", "Audio"], "examples/river2.png", "The green trees.", "examples/river1.wav", None],
["examples/zebras1.jpg", ["Example"], "examples/zebras2.jpg", "", None, None],
["examples/fries1.png", ["Example"], "examples/fries2.png", "", None, None],
["examples/placeholder.png", ["Video"], "examples/ref_vase.JPG", "", None, "examples/vasedeck.mp4"],
],
title=title,
description=description,
article=article,
allow_flagging='never',
cache_examples=False,
).launch()
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/app.py |
from .interactive import interactive_infer_video, interactive_infer_image | Segment-Everything-Everywhere-All-At-Once-main | demo_code/tasks/__init__.py |
# --------------------------------------------------------
# SEEM -- Segment Everything Everywhere All At Once
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected])
# --------------------------------------------------------
import torch
import numpy as np
import torch.nn.functional as F
from PIL import Image
from torchvision import transforms
from utils.visualizer import Visualizer
from detectron2.utils.colormap import random_color
from detectron2.data import MetadataCatalog
from detectron2.structures import BitMasks
from xdecoder.language.loss import vl_similarity
from utils.constants import COCO_PANOPTIC_CLASSES
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
import cv2
import os
import glob
import subprocess
from PIL import Image
import random
t = []
t.append(transforms.Resize(512, interpolation=Image.BICUBIC))
transform = transforms.Compose(t)
metadata = MetadataCatalog.get('coco_2017_train_panoptic')
all_classes = [name.replace('-other','').replace('-merged','') for name in COCO_PANOPTIC_CLASSES] + ["others"]
colors_list = [(np.array(color['color'])/255).tolist() for color in COCO_CATEGORIES] + [[1, 1, 1]]
def interactive_infer_image(model, audio_model, image, tasks, refimg=None, reftxt=None, audio_pth=None, video_pth=None):
image_ori = transform(image['image'])
mask_ori = image['mask']
width = image_ori.size[0]
height = image_ori.size[1]
image_ori = np.asarray(image_ori)
visual = Visualizer(image_ori, metadata=metadata)
images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()
# stroke_inimg = None
# stroke_refimg = None
data = {"image": images, "height": height, "width": width}
if len(tasks) == 0:
tasks = ["Panoptic"]
# inistalize task
model.model.task_switch['spatial'] = False
model.model.task_switch['visual'] = False
model.model.task_switch['grounding'] = False
model.model.task_switch['audio'] = False
example = None
if 'Example' in tasks:
model.model.task_switch['visual'] = True
model.model.task_switch['spatial'] = True
refimg_ori, refimg_mask = refimg['image'], refimg['mask']
refimg_ori = transform(refimg_ori)
_width = refimg_ori.size[0]
_height = refimg_ori.size[1]
refimg_ori = np.asarray(refimg_ori)
refimg_ori_np = refimg_ori.copy()
images = torch.from_numpy(refimg_ori.copy()).permute(2,0,1).cuda()
batched_inputs = [{'image': images, 'height': _height, 'width': _width, 'spatial_query':{}}]
refimg_mask = np.asarray(refimg_mask)[:,:,0:1].copy()
refimg_mask = torch.from_numpy(refimg_mask).permute(2,0,1)[None,]
refimg_mask = (F.interpolate(refimg_mask, (_height, _width), mode='bilinear') > 0)
batched_inputs[0]['spatial_query']['rand_shape'] = refimg_mask
outputs_refimg, img_shape = model.model.evaluate_referring_image(batched_inputs)
model.model.task_switch['spatial'] = False
data['visual'] = outputs_refimg
# overlay = refimg_mask[0,0].float().numpy()[:,:,None] * np.array([0,0,255])
# x = refimg_ori_np
# stroke_refimg = x * (1 - refimg_mask[0,0].float().numpy()[:,:,None]) + (x * refimg_mask[0,0].numpy()[:,:,None] * 0.2 + overlay * 0.8)
# stroke_refimg = Image.fromarray(stroke_refimg.astype(np.uint8))
stroke = None
if 'Stroke' in tasks:
model.model.task_switch['spatial'] = True
mask_ori = np.asarray(mask_ori)[:,:,0:1].copy()
mask_ori = torch.from_numpy(mask_ori).permute(2,0,1)[None,]
mask_ori = (F.interpolate(mask_ori, (height, width), mode='bilinear') > 0)
data['stroke'] = mask_ori
# overlay = mask_ori[0,0].float().numpy()[:,:,None] * np.array([0,255,0])
# x = image_ori
# stroke_inimg = x * (1 - mask_ori[0,0].float().numpy()[:,:,None]) + (x * mask_ori[0,0].numpy()[:,:,None] * 0.2 + overlay * 0.8)
# stroke_inimg = Image.fromarray(stroke_inimg.astype(np.uint8))
text = None
if 'Text' in tasks:
model.model.task_switch['grounding'] = True
data['text'] = [reftxt]
audio = None
if 'Audio' in tasks:
model.model.task_switch['audio'] = True
audio_result = audio_model.transcribe(audio_pth)
data['audio'] = [audio_result['text']]
batch_inputs = [data]
if 'Panoptic' in tasks:
model.model.metadata = metadata
results = model.model.evaluate(batch_inputs)
pano_seg = results[-1]['panoptic_seg'][0]
pano_seg_info = results[-1]['panoptic_seg'][1]
demo = visual.draw_panoptic_seg(pano_seg.cpu(), pano_seg_info) # rgb Image
res = demo.get_image()
return Image.fromarray(res), None
else:
results,image_size,extra = model.model.evaluate_demo(batch_inputs)
# If contians spatial use spatial:
if 'Stroke' in tasks:
v_emb = results['pred_maskembs']
s_emb = results['pred_pspatials']
pred_masks = results['pred_masks']
pred_logits = v_emb @ s_emb.transpose(1,2)
logits_idx_y = pred_logits[:,:,0].max(dim=1)[1]
logits_idx_x = torch.arange(len(logits_idx_y), device=logits_idx_y.device)
logits_idx = torch.stack([logits_idx_x, logits_idx_y]).tolist()
pred_masks_pos = pred_masks[logits_idx]
pred_class = results['pred_logits'][logits_idx].max(dim=-1)[1]
elif 'Example' in tasks:
v_emb = results['pred_maskembs']
s_emb = results['pred_pvisuals']
pred_masks = results['pred_masks']
pred_logits = v_emb @ s_emb.transpose(1,2)
logits_idx_y = pred_logits[:,:,0].max(dim=1)[1]
logits_idx_x = torch.arange(len(logits_idx_y), device=logits_idx_y.device)
logits_idx = torch.stack([logits_idx_x, logits_idx_y]).tolist()
pred_masks_pos = pred_masks[logits_idx]
pred_class = results['pred_logits'][logits_idx].max(dim=-1)[1]
elif 'Text' in tasks:
pred_masks = results['pred_masks'][0]
v_emb = results['pred_captions'][0]
t_emb = extra['grounding_class']
t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
temperature = model.model.sem_seg_head.predictor.lang_encoder.logit_scale
out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
matched_id = out_prob.max(0)[1]
pred_masks_pos = pred_masks[matched_id,:,:]
pred_class = results['pred_logits'][0][matched_id].max(dim=-1)[1]
elif 'Audio' in tasks:
pred_masks = results['pred_masks'][0]
v_emb = results['pred_captions'][0]
t_emb = extra['audio_class']
t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
temperature = model.model.sem_seg_head.predictor.lang_encoder.logit_scale
out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
matched_id = out_prob.max(0)[1]
pred_masks_pos = pred_masks[matched_id,:,:]
pred_class = results['pred_logits'][0][matched_id].max(dim=-1)[1]
# interpolate mask to ori size
pred_masks_pos = (F.interpolate(pred_masks_pos[None,], image_size[-2:], mode='bilinear')[0,:,:data['height'],:data['width']] > 0.0).float().cpu().numpy()
texts = [all_classes[pred_class[0]]]
for idx, mask in enumerate(pred_masks_pos):
# color = random_color(rgb=True, maximum=1).astype(np.int32).tolist()
out_txt = texts[idx] if 'Text' not in tasks else reftxt
demo = visual.draw_binary_mask(mask, color=colors_list[pred_class[0]%133], text=out_txt)
res = demo.get_image()
torch.cuda.empty_cache()
# return Image.fromarray(res), stroke_inimg, stroke_refimg
return Image.fromarray(res), None
def interactive_infer_video(model, audio_model, image, tasks, refimg=None, reftxt=None, audio_pth=None, video_pth=None):
if 'Video' in tasks:
input_dir = video_pth.replace('.mp4', '')
input_name = input_dir.split('/')[-1]
random_number = str(random.randint(10000, 99999))
output_dir = input_dir + '_output'
output_name = output_dir.split('/')[-1]
output_file = video_pth.replace('.mp4', '_{}_output.mp4'.format(random_number))
frame_interval = 10
# Ensure output directory exists
if not os.path.exists(input_dir):
os.makedirs(input_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Build the FFmpeg command
ffmpeg_cmd = "ffmpeg -i {} -vf \"fps=5\" {}/%04d.png".format(video_pth, input_dir)
os.system(ffmpeg_cmd)
data = {}
model.model.task_switch['visual'] = True
model.model.task_switch['spatial'] = True
refimg_ori, refimg_mask = refimg['image'], refimg['mask']
refimg_ori = transform(refimg_ori)
_width = refimg_ori.size[0]
_height = refimg_ori.size[1]
refimg_ori = np.asarray(refimg_ori)
refimg_ori_np = refimg_ori.copy()
images = torch.from_numpy(refimg_ori.copy()).permute(2,0,1).cuda()
batched_inputs = [{'image': images, 'height': _height, 'width': _width, 'spatial_query':{}}]
refimg_mask = np.asarray(refimg_mask)[:,:,0:1].copy()
refimg_mask = torch.from_numpy(refimg_mask).permute(2,0,1)[None,]
refimg_mask = (F.interpolate(refimg_mask, (_height, _width), mode='bilinear') > 0)
batched_inputs[0]['spatial_query']['rand_shape'] = refimg_mask
outputs_refimg, img_shape = model.model.evaluate_referring_image(batched_inputs)
model.model.task_switch['visual'] = False
model.model.task_switch['spatial'] = False
data['visual'] = outputs_refimg
model.model.task_switch['visual'] = True
frame_pths = sorted(glob.glob(os.path.join(input_dir, '*.png')))
for frame_pth in frame_pths:
image_ori = transform(Image.open(frame_pth))
width = image_ori.size[0]
height = image_ori.size[1]
image_ori = np.asarray(image_ori)
visual = Visualizer(image_ori[:,:,::-1], metadata=metadata)
images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()
data.update({"image": images, "height": height, "width": width})
batch_inputs = [data]
results,image_size,extra = model.model.evaluate_demo(batch_inputs)
v_emb = results['pred_maskembs']
s_emb = results['pred_pvisuals']
pred_masks = results['pred_masks']
pred_logits = v_emb @ s_emb.transpose(1,2)
logits_idx_y = pred_logits[:,:,0].max(dim=1)[1]
logits_idx_x = torch.arange(len(logits_idx_y), device=logits_idx_y.device)
logits_idx = torch.stack([logits_idx_x, logits_idx_y]).tolist()
pred_masks_pos = pred_masks[logits_idx]
pred_class = results['pred_logits'][logits_idx].max(dim=-1)[1]
pred_masks_pos = (F.interpolate(pred_masks_pos[None,], image_size[-2:], mode='bilinear')[0,:,:data['height'],:data['width']] > 0.0).float().cpu().numpy()
texts = [all_classes[pred_class[0]]]
for idx, mask in enumerate(pred_masks_pos):
out_txt = texts[idx]
demo = visual.draw_binary_mask(mask, color=colors_list[pred_class[0]%133], text=out_txt)
res = demo.get_image()
output_pth = frame_pth.replace(input_name, output_name)
cv2.imwrite(output_pth, res)
ffmpeg_cmd = "ffmpeg -framerate 5 -pattern_type glob -i '{}/*.png' -c:v libx264 {}".format(output_dir, output_file)
os.system(ffmpeg_cmd)
return None, output_file
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/tasks/interactive.py |
# --------------------------------------------------------
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected])
# --------------------------------------------------------
import os
import logging
import torch
import torch.nn as nn
from utils.model_loading import align_and_update_state_dicts
logger = logging.getLogger(__name__)
class BaseModel(nn.Module):
def __init__(self, opt, module: nn.Module):
super(BaseModel, self).__init__()
self.opt = opt
self.model = module
def forward(self, *inputs, **kwargs):
outputs = self.model(*inputs, **kwargs)
return outputs
def save_pretrained(self, save_dir):
save_path = os.path.join(save_dir, 'model_state_dict.pt')
torch.save(self.model.state_dict(), save_path)
def from_pretrained(self, load_path):
state_dict = torch.load(load_path, map_location=self.opt['device'])
state_dict = align_and_update_state_dicts(self.model.state_dict(), state_dict)
self.model.load_state_dict(state_dict, strict=False)
return self | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/BaseModel.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .architectures import build_model | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/__init__.py |
from .registry import model_entrypoints
from .registry import is_model
from .xdecoder_head import *
def build_xdecoder_head(config, *args, **kwargs):
model_name = config['MODEL']['HEAD']
if not is_model(model_name):
raise ValueError(f'Unkown model: {model_name}')
body = model_entrypoints(model_name)(config, *args, **kwargs)
return body | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/build.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# --------------------------------------------------------
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected]), Jianwei Yang ([email protected])
# --------------------------------------------------------
from typing import Dict
from torch import nn
from detectron2.layers import ShapeSpec
from .registry import register_body
from .encoder import build_encoder
from .decoder import build_decoder
from ..utils import configurable
class XDecoderHead(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
num_classes: int,
pixel_decoder: nn.Module,
loss_weight: float = 1.0,
ignore_value: int = -1,
# extra parameters
transformer_predictor: nn.Module,
transformer_in_feature: str,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
num_classes: number of classes to predict
pixel_decoder: the pixel decoder module
loss_weight: loss weight
ignore_value: category id to be ignored during training.
transformer_predictor: the transformer decoder that makes prediction
transformer_in_feature: input feature name to the transformer_predictor
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape]
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
self.ignore_value = ignore_value
self.common_stride = 4
self.loss_weight = loss_weight
self.pixel_decoder = pixel_decoder
self.predictor = transformer_predictor
self.transformer_in_feature = transformer_in_feature
self.num_classes = num_classes
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec], lang_encoder: nn.Module, extra: dict):
in_features_type = cfg['MODEL']['DECODER']['TRANSFORMER_IN_FEATURE']
enc_cfg = cfg['MODEL']['ENCODER']
dec_cfg = cfg['MODEL']['DECODER']
# figure out in_channels to transformer predictor
if in_features_type == "transformer_encoder":
transformer_predictor_in_channels = enc_cfg['CONVS_DIM']
elif in_features_type == "pixel_embedding":
transformer_predictor_in_channels = enc_cfg['MASK_DIM']
elif in_features_type == "multi_scale_pixel_decoder": # for maskformer2
transformer_predictor_in_channels = enc_cfg['CONVS_DIM']
else:
transformer_predictor_in_channels = input_shape[dec_cfg['TRANSFORMER_IN_FEATURE']].channels
return {
"input_shape": {
k: v for k, v in input_shape.items() if k in enc_cfg['IN_FEATURES']
},
"ignore_value": enc_cfg['IGNORE_VALUE'],
"num_classes": enc_cfg.get('NUM_CLASSES', None),
"pixel_decoder": build_encoder(cfg, input_shape),
"loss_weight": enc_cfg['LOSS_WEIGHT'],
"transformer_in_feature": dec_cfg['TRANSFORMER_IN_FEATURE'],
"transformer_predictor": build_decoder(
cfg,
transformer_predictor_in_channels,
lang_encoder,
mask_classification=True,
extra=extra,
),
}
def forward(self, features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
return self.layers(features, mask, target_queries, target_vlp, task, extra)
def layers(self, features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features)
if self.transformer_in_feature == "multi_scale_pixel_decoder":
predictions = self.predictor(multi_scale_features, mask_features, mask, target_queries, target_vlp, task, extra)
else:
if self.transformer_in_feature == "transformer_encoder":
assert (
transformer_encoder_features is not None
), "Please use the TransformerEncoderPixelDecoder."
predictions = self.predictor(transformer_encoder_features, mask_features, mask)
elif self.transformer_in_feature == "pixel_embedding":
predictions = self.predictor(mask_features, mask_features, mask)
else:
predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask)
return predictions
@register_body
def get_xdecoder_head(cfg, input_shape, lang_encoder, extra):
return XDecoderHead(cfg, input_shape, lang_encoder, extra) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/xdecoder_head.py |
_model_entrypoints = {}
def register_body(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_model_entrypoints[model_name] = fn
return fn
def model_entrypoints(model_name):
return _model_entrypoints[model_name]
def is_model(model_name):
return model_name in _model_entrypoints | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/registry.py |
from .build import build_xdecoder_head | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/transformer.py
"""
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import List, Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
class Transformer(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
decoder_layer = TransformerDecoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
if mask is not None:
mask = mask.flatten(1)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
hs = self.decoder(
tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed
)
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(
self,
src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
output = src
for layer in self.layers:
output = layer(
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos
)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(
output,
memory,
tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos,
query_pos=query_pos,
)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(
q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(
q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(
query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
return self.forward_post(
tgt,
memory,
tgt_mask,
memory_mask,
tgt_key_padding_mask,
memory_key_padding_mask,
pos,
query_pos,
)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/transformer_blocks.py |
from .registry import model_entrypoints
from .registry import is_model
from .transformer_encoder_fpn import *
# from .transformer_encoder_deform import *
def build_encoder(config, *args, **kwargs):
model_name = config['MODEL']['ENCODER']['NAME']
if not is_model(model_name):
raise ValueError(f'Unkown model: {model_name}')
return model_entrypoints(model_name)(config, *args, **kwargs) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/build.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
import fvcore.nn.weight_init as weight_init
from detectron2.layers import Conv2d, DeformConv, ShapeSpec, get_norm
from .registry import register_encoder
from ..transformer_blocks import TransformerEncoder, TransformerEncoderLayer, _get_clones, _get_activation_fn
from ...modules import PositionEmbeddingSine
from ...utils import configurable
# from ..layers import Conv2d, DeformConv, ShapeSpec, get_norm
# This is a modified FPN decoder.
class BasePixelDecoder(nn.Module):
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
conv_dim: int,
mask_dim: int,
mask_on: bool,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_channels = [v.channels for k, v in input_shape]
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(feature_channels):
if idx == len(self.in_features) - 1:
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
in_channels,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(None)
output_convs.append(output_conv)
else:
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
self.mask_on = mask_on
if self.mask_on:
self.mask_dim = mask_dim
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=3,
stride=1,
padding=1,
)
weight_init.c2_xavier_fill(self.mask_features)
self.maskformer_num_feature_levels = 3 # always use 3 scales
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
enc_cfg = cfg['MODEL']['ENCODER']
ret = {}
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in enc_cfg['IN_FEATURES']
}
ret["conv_dim"] = enc_cfg['CONVS_DIM']
ret["mask_dim"] = enc_cfg['MASK_DIM']
ret["norm"] = enc_cfg['NORM']
return ret
def forward_features(self, features):
multi_scale_features = []
num_cur_levels = 0
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
y = output_conv(x)
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(y)
num_cur_levels += 1
mask_features = self.mask_features(y) if self.mask_on else None
return mask_features, None, multi_scale_features
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
return self.forward_features(features)
class TransformerEncoderOnly(nn.Module):
def __init__(
self,
d_model=512,
nhead=8,
num_encoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
encoder_layer = TransformerEncoderLayer(
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, h, w = src.shape
src = src.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
if mask is not None:
mask = mask.flatten(1)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
return memory.permute(1, 2, 0).view(bs, c, h, w)
# This is a modified FPN decoder with extra Transformer encoder that processes the lowest-resolution feature map.
class TransformerEncoderPixelDecoder(BasePixelDecoder):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
transformer_pre_norm: bool,
conv_dim: int,
mask_dim: int,
mask_on: int,
norm: Optional[Union[str, Callable]] = None,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
transformer_pre_norm: whether to use pre-layernorm or not
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm, mask_on=mask_on)
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
feature_strides = [v.stride for k, v in input_shape]
feature_channels = [v.channels for k, v in input_shape]
in_channels = feature_channels[len(self.in_features) - 1]
self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1)
weight_init.c2_xavier_fill(self.input_proj)
self.transformer = TransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
normalize_before=transformer_pre_norm,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# update layer
use_bias = norm == ""
output_norm = get_norm(norm, conv_dim)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(output_conv)
delattr(self, "layer_{}".format(len(self.in_features)))
self.add_module("layer_{}".format(len(self.in_features)), output_conv)
self.output_convs[0] = output_conv
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
enc_cfg = cfg['MODEL']['ENCODER']
dec_cfg = cfg['MODEL']['DECODER']
ret = super().from_config(cfg, input_shape)
ret["transformer_dropout"] = dec_cfg['DROPOUT']
ret["transformer_nheads"] = dec_cfg['NHEADS']
ret["transformer_dim_feedforward"] = dec_cfg['DIM_FEEDFORWARD']
ret["transformer_enc_layers"] = enc_cfg['TRANSFORMER_ENC_LAYERS'] # a separate config
ret["transformer_pre_norm"] = dec_cfg['PRE_NORM']
ret['mask_on'] = cfg['MODEL']['DECODER']['MASK']
return ret
def forward_features(self, features):
multi_scale_features = []
num_cur_levels = 0
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[::-1]):
x = features[f]
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
if lateral_conv is None:
transformer = self.input_proj(x)
pos = self.pe_layer(x)
transformer = self.transformer(transformer, None, pos)
y = output_conv(transformer)
# save intermediate feature as input to Transformer decoder
transformer_encoder_features = transformer
else:
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
y = output_conv(y)
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(y)
num_cur_levels += 1
mask_features = self.mask_features(y) if self.mask_on else None
return mask_features, transformer_encoder_features, multi_scale_features
def forward(self, features, targets=None):
logger = logging.getLogger(__name__)
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
return self.forward_features(features)
@register_encoder
def get_transformer_encoder_fpn(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
"""
model = TransformerEncoderPixelDecoder(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/transformer_encoder_fpn.py |
_model_entrypoints = {}
def register_encoder(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_model_entrypoints[model_name] = fn
return fn
def model_entrypoints(model_name):
return _model_entrypoints[model_name]
def is_model(model_name):
return model_name in _model_entrypoints
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/registry.py |
from .build import build_encoder | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
from typing import Callable, Dict, List, Optional, Tuple, Union
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from torch.cuda.amp import autocast
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from detectron2.modeling import SEM_SEG_HEADS_REGISTRY
from .ops.modules import MSDeformAttn
from .registry import register_encoder
from ...utils import configurable
from ...modules import PositionEmbeddingSine
from ..transformer_blocks import _get_clones, _get_activation_fn
# MSDeformAttn Transformer encoder in deformable detr
class MSDeformAttnTransformerEncoderOnly(nn.Module):
def __init__(self, d_model=256, nhead=8,
num_encoder_layers=6, dim_feedforward=1024, dropout=0.1,
activation="relu",
num_feature_levels=4, enc_n_points=4,
):
super().__init__()
self.d_model = d_model
self.nhead = nhead
encoder_layer = MSDeformAttnTransformerEncoderLayer(d_model, dim_feedforward,
dropout, activation,
num_feature_levels, nhead, enc_n_points)
self.encoder = MSDeformAttnTransformerEncoder(encoder_layer, num_encoder_layers)
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
normal_(self.level_embed)
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def forward(self, srcs, pos_embeds):
masks = [torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool) for x in srcs]
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# encoder
memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)
return memory, spatial_shapes, level_start_index
class MSDeformAttnTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=256, d_ffn=1024,
dropout=0.1, activation="relu",
n_levels=4, n_heads=8, n_points=4):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, padding_mask=None):
# self attention
src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class MSDeformAttnTransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):
output = src
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)
for _, layer in enumerate(self.layers):
output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)
return output
# @SEM_SEG_HEADS_REGISTRY.register()
class MSDeformAttnPixelDecoder(nn.Module):
@configurable
def __init__(
self,
input_shape: Dict[str, ShapeSpec],
*,
transformer_dropout: float,
transformer_nheads: int,
transformer_dim_feedforward: int,
transformer_enc_layers: int,
conv_dim: int,
mask_dim: int,
norm: Optional[Union[str, Callable]] = None,
# deformable transformer encoder args
transformer_in_features: List[str],
common_stride: int,
):
"""
NOTE: this interface is experimental.
Args:
input_shape: shapes (channels and stride) of the input features
transformer_dropout: dropout probability in transformer
transformer_nheads: number of heads in transformer
transformer_dim_feedforward: dimension of feedforward network
transformer_enc_layers: number of transformer encoder layers
conv_dims: number of output channels for the intermediate conv layers.
mask_dim: number of output channels for the final conv layer.
norm (str or callable): normalization for all conv layers
"""
super().__init__()
transformer_input_shape = {
k: v for k, v in input_shape.items() if k in transformer_in_features
}
# this is the input shape of pixel decoder
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
self.feature_strides = [v.stride for k, v in input_shape]
self.feature_channels = [v.channels for k, v in input_shape]
# this is the input shape of transformer encoder (could use less features than pixel decoder
transformer_input_shape = sorted(transformer_input_shape.items(), key=lambda x: x[1].stride)
self.transformer_in_features = [k for k, v in transformer_input_shape] # starting from "res2" to "res5"
transformer_in_channels = [v.channels for k, v in transformer_input_shape]
self.transformer_feature_strides = [v.stride for k, v in transformer_input_shape] # to decide extra FPN layers
self.transformer_num_feature_levels = len(self.transformer_in_features)
if self.transformer_num_feature_levels > 1:
input_proj_list = []
# from low resolution to high resolution (res5 -> res2)
for in_channels in transformer_in_channels[::-1]:
input_proj_list.append(nn.Sequential(
nn.Conv2d(in_channels, conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
))
self.input_proj = nn.ModuleList(input_proj_list)
else:
self.input_proj = nn.ModuleList([
nn.Sequential(
nn.Conv2d(transformer_in_channels[-1], conv_dim, kernel_size=1),
nn.GroupNorm(32, conv_dim),
)])
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
self.transformer = MSDeformAttnTransformerEncoderOnly(
d_model=conv_dim,
dropout=transformer_dropout,
nhead=transformer_nheads,
dim_feedforward=transformer_dim_feedforward,
num_encoder_layers=transformer_enc_layers,
num_feature_levels=self.transformer_num_feature_levels,
)
N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
self.mask_dim = mask_dim
# use 1x1 conv instead
self.mask_features = Conv2d(
conv_dim,
mask_dim,
kernel_size=1,
stride=1,
padding=0,
)
weight_init.c2_xavier_fill(self.mask_features)
self.maskformer_num_feature_levels = 3 # always use 3 scales
self.common_stride = common_stride
# extra fpn levels
stride = min(self.transformer_feature_strides)
self.num_fpn_levels = int(np.log2(stride) - np.log2(self.common_stride))
lateral_convs = []
output_convs = []
use_bias = norm == ""
for idx, in_channels in enumerate(self.feature_channels[:self.num_fpn_levels]):
lateral_norm = get_norm(norm, conv_dim)
output_norm = get_norm(norm, conv_dim)
lateral_conv = Conv2d(
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
)
output_conv = Conv2d(
conv_dim,
conv_dim,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias,
norm=output_norm,
activation=F.relu,
)
weight_init.c2_xavier_fill(lateral_conv)
weight_init.c2_xavier_fill(output_conv)
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
self.add_module("layer_{}".format(idx + 1), output_conv)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
# Place convs into top-down order (from low to high resolution)
# to make the top-down computation in forward clearer.
self.lateral_convs = lateral_convs[::-1]
self.output_convs = output_convs[::-1]
@classmethod
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
ret = {}
enc_cfg = cfg['MODEL']['ENCODER']
dec_cfg = cfg['MODEL']['DECODER']
ret["input_shape"] = {
k: v for k, v in input_shape.items() if k in enc_cfg['IN_FEATURES']
}
ret["conv_dim"] = enc_cfg['CONVS_DIM']
ret["mask_dim"] = enc_cfg['MASK_DIM']
ret["norm"] = enc_cfg['NORM']
ret["transformer_dropout"] = dec_cfg['DROPOUT']
ret["transformer_nheads"] = dec_cfg['NHEADS']
# ret["transformer_dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
ret["transformer_dim_feedforward"] = 1024 # use 1024 for deformable transformer encoder
ret[
"transformer_enc_layers"
] = enc_cfg['TRANSFORMER_ENC_LAYERS'] # a separate config
ret["transformer_in_features"] = enc_cfg['DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES']
ret["common_stride"] = enc_cfg['COMMON_STRIDE']
return ret
@autocast(enabled=False)
def forward_features(self, features):
srcs = []
pos = []
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.transformer_in_features[::-1]):
x = features[f].float() # deformable detr does not support half precision
srcs.append(self.input_proj[idx](x))
pos.append(self.pe_layer(x))
y, spatial_shapes, level_start_index = self.transformer(srcs, pos)
bs = y.shape[0]
split_size_or_sections = [None] * self.transformer_num_feature_levels
for i in range(self.transformer_num_feature_levels):
if i < self.transformer_num_feature_levels - 1:
split_size_or_sections[i] = level_start_index[i + 1] - level_start_index[i]
else:
split_size_or_sections[i] = y.shape[1] - level_start_index[i]
y = torch.split(y, split_size_or_sections, dim=1)
out = []
multi_scale_features = []
num_cur_levels = 0
for i, z in enumerate(y):
out.append(z.transpose(1, 2).view(bs, -1, spatial_shapes[i][0], spatial_shapes[i][1]))
# append `out` with extra FPN levels
# Reverse feature maps into top-down order (from low to high resolution)
for idx, f in enumerate(self.in_features[:self.num_fpn_levels][::-1]):
x = features[f].float()
lateral_conv = self.lateral_convs[idx]
output_conv = self.output_convs[idx]
cur_fpn = lateral_conv(x)
# Following FPN implementation, we use nearest upsampling here
y = cur_fpn + F.interpolate(out[-1], size=cur_fpn.shape[-2:], mode="bilinear", align_corners=False)
y = output_conv(y)
out.append(y)
for o in out:
if num_cur_levels < self.maskformer_num_feature_levels:
multi_scale_features.append(o)
num_cur_levels += 1
return self.mask_features(out[-1]), out[0], multi_scale_features
@register_encoder
def get_transformer_encoder_deform(cfg, input_shape):
"""
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
"""
model = MSDeformAttnPixelDecoder(cfg, input_shape)
forward_features = getattr(model, "forward_features", None)
if not callable(forward_features):
raise ValueError(
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
f"Please implement forward_features for {name} to only return mask features."
)
return model | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/transformer_encoder_deform.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch
N, M, D = 1, 2, 2
Lq, L, P = 2, 2, 2
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1]))
S = sum([(H*W).item() for H, W in shapes])
torch.manual_seed(3)
@torch.no_grad()
def check_forward_equal_with_pytorch_double():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
@torch.no_grad()
def check_forward_equal_with_pytorch_float():
value = torch.rand(N, S, M, D).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')
def check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):
value = torch.rand(N, S, M, channels).cuda() * 0.01
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5
attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)
im2col_step = 2
func = MSDeformAttnFunction.apply
value.requires_grad = grad_value
sampling_locations.requires_grad = grad_sampling_loc
attention_weights.requires_grad = grad_attn_weight
gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))
print(f'* {gradok} check_gradient_numerical(D={channels})')
if __name__ == '__main__':
check_forward_equal_with_pytorch_double()
check_forward_equal_with_pytorch_float()
for channels in [30, 32, 64, 71, 1025, 2048, 3096]:
check_gradient_numerical(channels, True, True, True)
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/ops/test.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
import os
import glob
import torch
from torch.utils.cpp_extension import CUDA_HOME
from torch.utils.cpp_extension import CppExtension
from torch.utils.cpp_extension import CUDAExtension
from setuptools import find_packages
from setuptools import setup
requirements = ["torch", "torchvision"]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "src")
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources = main_file + source_cpu
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
# Force cuda since torch ask for a device, not if cuda is in fact available.
if (os.environ.get('FORCE_CUDA') or torch.cuda.is_available()) and CUDA_HOME is not None:
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
if CUDA_HOME is None:
raise NotImplementedError('CUDA_HOME is None. Please set environment variable CUDA_HOME.')
else:
raise NotImplementedError('No CUDA runtime is found. Please set FORCE_CUDA=1 or test it by running torch.cuda.is_available().')
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"MultiScaleDeformableAttention",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="MultiScaleDeformableAttention",
version="1.0",
author="Weijie Su",
url="https://github.com/fundamentalvision/Deformable-DETR",
description="PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention",
packages=find_packages(exclude=("configs", "tests",)),
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/ops/setup.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import torch
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
try:
import MultiScaleDeformableAttention as MSDA
except ModuleNotFoundError as e:
info_string = (
"\n\nPlease compile MultiScaleDeformableAttention CUDA op with the following commands:\n"
"\t`cd mask2former/modeling/pixel_decoder/ops`\n"
"\t`sh make.sh`\n"
)
raise ModuleNotFoundError(info_string)
class MSDeformAttnFunction(Function):
@staticmethod
def forward(ctx, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step):
ctx.im2col_step = im2col_step
output = MSDA.ms_deform_attn_forward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ctx.im2col_step)
ctx.save_for_backward(value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = \
MSDA.ms_deform_attn_backward(
value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, ctx.im2col_step)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights):
# for debug and test only,
# need to use cuda version instead
N_, S_, M_, D_ = value.shape
_, Lq_, M_, L_, P_, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for lid_, (H_, W_) in enumerate(value_spatial_shapes):
# N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_
value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_*M_, D_, H_, W_)
# N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2
sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)
# N_*M_, D_, Lq_, P_
sampling_value_l_ = F.grid_sample(value_l_, sampling_grid_l_,
mode='bilinear', padding_mode='zeros', align_corners=False)
sampling_value_list.append(sampling_value_l_)
# (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)
attention_weights = attention_weights.transpose(1, 2).reshape(N_*M_, 1, Lq_, L_*P_)
output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_*D_, Lq_)
return output.transpose(1, 2).contiguous()
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/ops/functions/ms_deform_attn_func.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from .ms_deform_attn_func import MSDeformAttnFunction
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/ops/functions/__init__.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import warnings
import math
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_, constant_
from ..functions import MSDeformAttnFunction
from ..functions.ms_deform_attn_func import ms_deform_attn_core_pytorch
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
return (n & (n-1) == 0) and n != 0
class MSDeformAttn(nn.Module):
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""
Multi-Scale Deformable Attention Module
:param d_model hidden dimension
:param n_levels number of feature levels
:param n_heads number of attention heads
:param n_points number of sampling points per attention head per feature level
"""
super().__init__()
if d_model % n_heads != 0:
raise ValueError('d_model must be divisible by n_heads, but got {} and {}'.format(d_model, n_heads))
_d_per_head = d_model // n_heads
# you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation
if not _is_power_of_2(_d_per_head):
warnings.warn("You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 "
"which is more efficient in our CUDA implementation.")
self.im2col_step = 128
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
constant_(self.sampling_offsets.weight.data, 0.)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (grid_init / grid_init.abs().max(-1, keepdim=True)[0]).view(self.n_heads, 1, 1, 2).repeat(1, self.n_levels, self.n_points, 1)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.)
constant_(self.attention_weights.bias.data, 0.)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.)
def forward(self, query, reference_points, input_flatten, input_spatial_shapes, input_level_start_index, input_padding_mask=None):
"""
:param query (N, Length_{query}, C)
:param reference_points (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area
or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes
:param input_flatten (N, \sum_{l=0}^{L-1} H_l \cdot W_l, C)
:param input_spatial_shapes (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
:param input_level_start_index (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]
:param input_padding_mask (N, \sum_{l=0}^{L-1} H_l \cdot W_l), True for padding elements, False for non-padding elements
:return output (N, Length_{query}, C)
"""
N, Len_q, _ = query.shape
N, Len_in, _ = input_flatten.shape
assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in
value = self.value_proj(input_flatten)
if input_padding_mask is not None:
value = value.masked_fill(input_padding_mask[..., None], float(0))
value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(N, Len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(N, Len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(N, Len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1)
sampling_locations = reference_points[:, :, None, :, None, :] \
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
elif reference_points.shape[-1] == 4:
sampling_locations = reference_points[:, :, None, :, None, :2] \
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
else:
raise ValueError(
'Last dim of reference_points must be 2 or 4, but get {} instead.'.format(reference_points.shape[-1]))
try:
output = MSDeformAttnFunction.apply(
value, input_spatial_shapes, input_level_start_index, sampling_locations, attention_weights, self.im2col_step)
except:
# CPU
output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
# # For FLOPs calculation only
# output = ms_deform_attn_core_pytorch(value, input_spatial_shapes, sampling_locations, attention_weights)
output = self.output_proj(output)
return output
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/ops/modules/ms_deform_attn.py |
# ------------------------------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
# ------------------------------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/fundamentalvision/Deformable-DETR
from .ms_deform_attn import MSDeformAttn
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/encoder/ops/modules/__init__.py |
from .registry import model_entrypoints
from .registry import is_model
from .seem import *
def build_decoder(config, *args, **kwargs):
model_name = config['MODEL']['DECODER']['NAME']
if not is_model(model_name):
raise ValueError(f'Unkown model: {model_name}')
return model_entrypoints(model_name)(config, *args, **kwargs) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/build.py |
# --------------------------------------------------------
# SEEM -- Segment Everything Everywhere All At Once
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected]), Jianwei Yang ([email protected])
# --------------------------------------------------------
import logging
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from timm.models.layers import trunc_normal_
from detectron2.layers import Conv2d
import fvcore.nn.weight_init as weight_init
from .utils.utils import rand_sample, prepare_features
from .utils.attn import MultiheadAttention
from .utils.attention_data_struct import AttentionDataStruct
from .registry import register_decoder
from ...utils import configurable
from ...modules import PositionEmbeddingSine
from ...modules.point_features import point_sample
class SelfAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt,
tgt_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
return self.forward_post(tgt, tgt_mask,
tgt_key_padding_mask, query_pos)
class CrossAttentionLayer(nn.Module):
def __init__(self, d_model, nhead, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt, avg_attn
def forward_pre(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm(tgt)
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)
tgt = tgt + self.dropout(tgt2)
return tgt, avg_attn
def forward(self, tgt, memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, memory_mask,
memory_key_padding_mask, pos, query_pos)
class FFNLayer(nn.Module):
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
activation="relu", normalize_before=False):
super().__init__()
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm = nn.LayerNorm(d_model)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout(tgt2)
tgt = self.norm(tgt)
return tgt
def forward_pre(self, tgt):
tgt2 = self.norm(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout(tgt2)
return tgt
def forward(self, tgt):
if self.normalize_before:
return self.forward_pre(tgt)
return self.forward_post(tgt)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class MultiScaleMaskedTransformerDecoder(nn.Module):
_version = 2
@configurable
def __init__(
self,
lang_encoder: nn.Module,
in_channels,
mask_classification=True,
*,
hidden_dim: int,
dim_proj: int,
num_queries: int,
contxt_len: int,
nheads: int,
dim_feedforward: int,
dec_layers: int,
pre_norm: bool,
mask_dim: int,
task_switch: dict,
enforce_input_project: bool,
max_spatial_len: int,
attn_arch: dict,
):
"""
NOTE: this interface is experimental.
Args:
in_channels: channels of the input features
mask_classification: whether to add mask classifier or not
num_classes: number of classes
hidden_dim: Transformer feature dimension
num_queries: number of queries
nheads: number of heads
dim_feedforward: feature dimension in feedforward network
enc_layers: number of Transformer encoder layers
dec_layers: number of Transformer decoder layers
pre_norm: whether to use pre-LayerNorm or not
mask_dim: mask feature dimension
enforce_input_project: add input project 1x1 conv even if input
channels and hidden dim is identical
"""
super().__init__()
assert mask_classification, "Only support mask classification model"
self.mask_classification = mask_classification
# positional encoding
N_steps = hidden_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
# define Transformer decoder here
self.num_heads = nheads
self.num_layers = dec_layers
self.contxt_len = contxt_len
self.transformer_self_attention_layers = nn.ModuleList()
self.transformer_cross_attention_layers = nn.ModuleList()
self.transformer_ffn_layers = nn.ModuleList()
for _ in range(self.num_layers):
self.transformer_self_attention_layers.append(
SelfAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_cross_attention_layers.append(
CrossAttentionLayer(
d_model=hidden_dim,
nhead=nheads,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.transformer_ffn_layers.append(
FFNLayer(
d_model=hidden_dim,
dim_feedforward=dim_feedforward,
dropout=0.0,
normalize_before=pre_norm,
)
)
self.decoder_norm = nn.LayerNorm(hidden_dim)
self.num_queries = num_queries
# learnable query features
self.query_feat = nn.Embedding(num_queries, hidden_dim)
# learnable query p.e.
self.query_embed = nn.Embedding(num_queries, hidden_dim)
# learnable positive negative indicator
self.pn_indicator = nn.Embedding(2, hidden_dim)
# level embedding (we always use 3 scales)
self.num_feature_levels = 3
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
self.input_proj = nn.ModuleList()
for _ in range(self.num_feature_levels):
if in_channels != hidden_dim or enforce_input_project:
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
weight_init.c2_xavier_fill(self.input_proj[-1])
else:
self.input_proj.append(nn.Sequential())
self.task_switch = task_switch
self.query_index = {}
# output FFNs
self.lang_encoder = lang_encoder
if self.task_switch['mask']:
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
self.class_embed = nn.Parameter(torch.empty(hidden_dim, dim_proj))
trunc_normal_(self.class_embed, std=.02)
if task_switch['bbox']:
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
if task_switch['spatial']:
# spatial query
self.mask_sptial_embed = nn.ParameterList([nn.Parameter(torch.empty(hidden_dim, hidden_dim)) for x in range(3)])
trunc_normal_(self.mask_sptial_embed[0], std=.02)
trunc_normal_(self.mask_sptial_embed[1], std=.02)
trunc_normal_(self.mask_sptial_embed[2], std=.02)
self.max_spatial_len = max_spatial_len
# spatial memory
num_spatial_memories = attn_arch['SPATIAL_MEMORIES']
self.spatial_embed = nn.Embedding(num_spatial_memories, hidden_dim)
self.spatial_featured = nn.Embedding(num_spatial_memories, hidden_dim)
# build AttentionDataStruct
attn_arch['NUM_LAYERS'] = self.num_layers
self.attention_data = AttentionDataStruct(attn_arch, task_switch)
@classmethod
def from_config(cls, cfg, in_channels, lang_encoder, mask_classification, extra):
ret = {}
ret["lang_encoder"] = lang_encoder
ret["in_channels"] = in_channels
ret["mask_classification"] = mask_classification
enc_cfg = cfg['MODEL']['ENCODER']
dec_cfg = cfg['MODEL']['DECODER']
ret["hidden_dim"] = dec_cfg['HIDDEN_DIM']
ret["dim_proj"] = cfg['MODEL']['DIM_PROJ']
ret["num_queries"] = dec_cfg['NUM_OBJECT_QUERIES']
ret["contxt_len"] = cfg['MODEL']['TEXT']['CONTEXT_LENGTH']
# Transformer parameters:
ret["nheads"] = dec_cfg['NHEADS']
ret["dim_feedforward"] = dec_cfg['DIM_FEEDFORWARD']
# NOTE: because we add learnable query features which requires supervision,
# we add minus 1 to decoder layers to be consistent with our loss
# implementation: that is, number of auxiliary losses is always
# equal to number of decoder layers. With learnable query features, the number of
# auxiliary losses equals number of decoders plus 1.
assert dec_cfg['DEC_LAYERS'] >= 1
ret["dec_layers"] = dec_cfg['DEC_LAYERS'] - 1
ret["pre_norm"] = dec_cfg['PRE_NORM']
ret["enforce_input_project"] = dec_cfg['ENFORCE_INPUT_PROJ']
ret["mask_dim"] = enc_cfg['MASK_DIM']
ret["task_switch"] = extra['task_switch']
ret["max_spatial_len"] = dec_cfg['MAX_SPATIAL_LEN']
# attn data struct
ret["attn_arch"] = cfg['ATTENTION_ARCH']
return ret
def forward(self, x, mask_features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
# x is a list of multi-scale feature
assert len(x) == self.num_feature_levels; del mask
spatial_extra_flag = 'spatial_query_pos_mask' in extra.keys() or task == 'refimg'
grounding_extra_flag = 'grounding_tokens' in extra.keys()
visual_extra_flag = 'visual_query_pos' in extra.keys()
audio_extra_flag = 'audio_tokens' in extra.keys()
spatial_memory_flag = 'prev_mask' in extra.keys()
flags = {"spatial": spatial_extra_flag, "grounding": grounding_extra_flag, "memories_spatial": spatial_memory_flag, "visual": visual_extra_flag, "audio": audio_extra_flag}
self.attention_data.reset(flags, task, extra)
src, pos, size_list = prepare_features(x, self.num_feature_levels, self.pe_layer, self.input_proj, self.level_embed)
_, bs, _ = src[0].shape
# QxNxC
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
self.attention_data.set('queries_object', 'queries', output, query_embed)
if self.task_switch['spatial'] and spatial_extra_flag:
# get divisor
_,h,w = extra['spatial_query_pos_mask'][0].shape
divisor = torch.tensor([h,w], device=output.device)[None,]
# Get mean pos spatial query
non_zero_pos_point = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[-1]).t() for m in extra['spatial_query_pos_mask']]
non_zero_pos_point = nn.utils.rnn.pad_sequence(non_zero_pos_point, padding_value=-1).permute(1,0,2)
non_zero_pos_mask = (non_zero_pos_point.sum(dim=-1) < 0)
spatial_query_pos = point_sample(mask_features, non_zero_pos_point.flip(dims=(2,)).type(mask_features.dtype), align_corners=True)
spatial_query_pos = torch.stack([x[m].mean(dim=0, keepdim=True) for x, m in zip(spatial_query_pos.transpose(1,2), ~non_zero_pos_mask)]).transpose(0,1).nan_to_num()
# Get mean neg spatial query
non_zero_neg_point = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[-1]).t() for m in extra['spatial_query_neg_mask']]
non_zero_neg_point = nn.utils.rnn.pad_sequence(non_zero_neg_point, padding_value=-1).permute(1,0,2)
non_zero_neg_mask = (non_zero_neg_point.sum(dim=-1) < 0)
spatial_query_neg = point_sample(mask_features, non_zero_neg_point.flip(dims=(2,)).type(mask_features.dtype), align_corners=True)
spatial_query_neg = torch.stack([x[m].mean(dim=0, keepdim=True) for x, m in zip(spatial_query_neg.transpose(1,2), ~non_zero_neg_mask)]).transpose(0,1).nan_to_num()
# merge positive and negative sample points for self attention
# Get layerwise spatial query
src_spatial_queries = []
src_spatial_maskings = []
for i in range(len(src)):
hw,_,dc = src[i].shape
src_mask_features = src[i].view(size_list[i][0],size_list[i][1],bs,dc)
src_mask_features = src_mask_features @ self.mask_sptial_embed[i]
non_zero_query_point_pos = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[i]).t() for m in extra['spatial_query_pos_mask']]
non_zero_query_point_neg = [rand_sample((m.nonzero()[:,1:]/divisor).t(), self.max_spatial_len[i]).t() for m in extra['spatial_query_neg_mask']]
non_zero_query_point = [torch.cat([x,y], dim=0) for x,y in zip(non_zero_query_point_pos, non_zero_query_point_neg)]
pos_neg_indicator = [torch.cat([torch.ones(x.shape[0], device=x.device), -torch.ones(y.shape[0], device=y.device)]) for x,y in zip(non_zero_query_point_pos, non_zero_query_point_neg)]
pos_neg_indicator = nn.utils.rnn.pad_sequence(pos_neg_indicator, padding_value=0)
non_zero_query_point = nn.utils.rnn.pad_sequence(non_zero_query_point, padding_value=-1).permute(1,0,2)
non_zero_query_mask = (non_zero_query_point.sum(dim=-1) < 0)
non_zero_query_point[non_zero_query_mask] = 0
spatial_tokens = point_sample(src_mask_features.permute(2,3,0,1), non_zero_query_point.flip(dims=(2,)).type(src_mask_features.dtype), align_corners=True).permute(2,0,1)
spatial_tokens[pos_neg_indicator==1] += self.pn_indicator.weight[0:1]
spatial_tokens[pos_neg_indicator==-1] += self.pn_indicator.weight[1:2]
src_spatial_queries += [spatial_tokens]
src_spatial_maskings += [non_zero_query_mask]
if 'refimg' in task:
output_refimg = {}
output_refimg['visual_query_pos'] = spatial_query_pos
output_refimg['visual_query_neg'] = spatial_query_neg
output_refimg['src_visual_queries'] = src_spatial_queries
output_refimg['src_visual_maskings'] = src_spatial_maskings
return output_refimg
if task != 'demo':
# Get object query for spatial index
self.attention_data.set('queries_spatial', 'queries')
if self.task_switch['visual'] and visual_extra_flag:
visual_query_pos = extra['visual_query_pos']
visual_query_neg = extra['visual_query_neg']
src_visual_queries = extra['src_visual_queries']
src_visual_maskings = extra['src_visual_maskings']
if self.task_switch['grounding'] and grounding_extra_flag:
# Get grounding tokens
grounding_tokens = extra['grounding_tokens']
_grounding_tokens = grounding_tokens.detach().clone()
self.attention_data.set('tokens_grounding', 'tokens', grounding_tokens, _grounding_tokens)
self.attention_data.set_maskings('tokens_grounding', extra['grounding_nonzero_mask'])
if self.task_switch['audio'] and audio_extra_flag:
# Get grounding tokens
grounding_tokens = extra['audio_tokens']
_grounding_tokens = grounding_tokens.detach().clone()
self.attention_data.set('tokens_audio', 'tokens', grounding_tokens, _grounding_tokens)
self.attention_data.set_maskings('tokens_audio', extra['audio_nonzero_mask'])
output, query_embed = self.attention_data.cross_attn_variables()
# prediction heads on learnable query features
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0])
results["predictions_pos_spatial"] = spatial_query_pos.transpose(0,1) if spatial_extra_flag else None
results["predictions_neg_spatial"] = spatial_query_neg.transpose(0,1) if spatial_extra_flag else None
results["predictions_pos_visual"] = visual_query_pos.transpose(0,1) if visual_extra_flag else None
results["predictions_neg_visual"] = visual_query_neg.transpose(0,1) if visual_extra_flag else None
self.attention_data.set_results(results)
for i in range(self.num_layers):
level_index = i % self.num_feature_levels
# CROSS ATTENTION
output, avg_attn = self.transformer_cross_attention_layers[i](
output, src[level_index],
memory_mask=self.attention_data.cross_attn_mask(size_list[level_index], self.num_heads),
memory_key_padding_mask=None, # here we do not apply masking on padded region
pos=pos[level_index], query_pos=query_embed
)
self.attention_data.update_variables(output, 'cross_attn')
# SELF ATTENTION
self_attn_mask = torch.zeros((bs, self.num_queries, self.num_queries), device=query_embed.device).bool() # Default False (attend oq)
if self.task_switch['spatial'] and spatial_extra_flag:
# get spatial tokens
spatial_tokens = src_spatial_queries[level_index]
_spatial_tokens = spatial_tokens.detach().clone()
self.attention_data.set('tokens_spatial', 'tokens', spatial_tokens, _spatial_tokens)
self.attention_data.set_maskings('tokens_spatial', src_spatial_maskings[level_index])
if self.task_switch['visual'] and visual_extra_flag:
# get spatial tokens
visual_tokens = src_visual_queries[level_index]
_visual_tokens = visual_tokens.detach().clone()
self.attention_data.set('tokens_visual', 'tokens', visual_tokens, _visual_tokens)
self.attention_data.set_maskings('tokens_visual', src_visual_maskings[level_index])
output, query_embed, self_attn_mask = self.attention_data.self_attn(bs, self.num_heads)
output = self.transformer_self_attention_layers[i](
output, tgt_mask=self_attn_mask,
tgt_key_padding_mask=None,
query_pos=query_embed)
# FFN
output = self.transformer_ffn_layers[i](
output
)
self.attention_data.update_variables(output, 'self_attn')
output, query_embed = self.attention_data.cross_attn_variables()
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], layer_id=i)
results["predictions_pos_spatial"] = spatial_query_pos.transpose(0,1) if spatial_extra_flag else None
results["predictions_neg_spatial"] = spatial_query_neg.transpose(0,1) if spatial_extra_flag else None
results["predictions_pos_visual"] = visual_query_pos.transpose(0,1) if visual_extra_flag else None
results["predictions_neg_visual"] = visual_query_neg.transpose(0,1) if visual_extra_flag else None
self.attention_data.set_results(results)
return self.attention_data.organize_output()
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size, layer_id=-1):
decoder_output = self.decoder_norm(output)
decoder_output = decoder_output.transpose(0, 1)
class_embed = decoder_output @ self.class_embed
outputs_class = self.lang_encoder.compute_similarity(class_embed)
mask_embed = self.mask_embed(decoder_output)
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
outputs_bbox = [None for i in range(len(outputs_mask))]
if self.task_switch['bbox']:
outputs_bbox = self.bbox_embed(decoder_output)
# NOTE: prediction is of higher-resolution
# [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW]
attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False)
# must use bool type
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
attn_mask = attn_mask.detach()
outputs_caption = class_embed
results = {
"attn_mask": attn_mask,
"predictions_class": outputs_class,
"predictions_mask": outputs_mask,
"predictions_bbox": outputs_bbox,
"predictions_caption": outputs_caption,
"predictions_maskemb": mask_embed,
}
return results
@register_decoder
def get_masked_transformer_decoder(cfg, in_channels, lang_encoder, mask_classification, extra):
return MultiScaleMaskedTransformerDecoder(cfg, in_channels, lang_encoder, mask_classification, extra) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/seem.py |
_model_entrypoints = {}
def register_decoder(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_model_entrypoints[model_name] = fn
return fn
def model_entrypoints(model_name):
return _model_entrypoints[model_name]
def is_model(model_name):
return model_name in _model_entrypoints | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/registry.py |
from .build import build_decoder | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/__init__.py |
from .utils import *
from .attention_data_struct import *
from .attn import * | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/utils/__init__.py |
# --------------------------------------------------------
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected])
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
predict_name_matcher = {"predictions_class": ["pred_logits"],
"predictions_mask":["pred_masks", "pred_gmasks", "pred_smasks"],
"predictions_caption":["pred_captions", "pred_gtexts"],
"predictions_maskemb":["pred_maskembs", "pred_smaskembs"],
"predictions_pos_spatial":["pred_pspatials"],
"predictions_neg_spatial":["pred_nspatials"],
"predictions_pos_visual":["pred_pvisuals"],
"predictions_neg_visual":["pred_nvisuals"]}
predict_index_matcher = {"predictions_class": ["queries_object"],
"predictions_mask":["queries_object", "queries_grounding", "queries_spatial"],
"predictions_caption": ["queries_object", "queries_grounding"],
"predictions_maskemb":["queries_object", "queries_spatial"],
"predictions_pos_spatial":["all"],
"predictions_neg_spatial":["all"],
"predictions_pos_visual":["all"],
"predictions_neg_visual":["all"]}
class Variable(object):
'''
Store dataset variable for attention
output: embedding that accumuates during cross/self attention
pos: positional embedding that is fixed during cross/self attention
name: name of the variable
type: type of the variable, e.g. queries, tokens
attn_mask: attention mask for corss attention
masking: masking for padding
'''
def __init__(self, output, name, _type, pos=None):
self.output = output
self.pos = pos
self.name = name
self.type = _type
self.attn_mask = None
self.masking = None
def copy(self,):
output = self.output.clone() if self.output is not None else None
pos = self.pos.clone() if self.pos is not None else None
return Variable(output, self.name, self.type, pos)
class AttentionDataStruct(nn.Module):
'''
Store dataset structure for cross/self attention
task_switch: switch for different tasks
p_attn_variables: prototype of variables that is used in cross/self attention
p_self_attn: prototype of variables that is used in self attention
p_cross_attn: prototype of variables that is used in cross attention
p_iter: prototype of iteration for different queries
p_masking: prototype of masking for different tokens
p_duplication: prototype of duplication for different quries
'''
def __init__(self, attn_arch, task_switch):
super(AttentionDataStruct, self).__init__()
self.task_switch = task_switch
# p stands for prototype
self.p_attn_variables = attn_arch['VARIABLE']
self.p_self_attn = attn_arch['SELF_ATTENTION']
self.p_cross_attn = attn_arch['CROSS_ATTENTION']
self.p_masking = attn_arch['MASKING']
self.p_duplication = attn_arch['DUPLICATION']
self.num_layers = attn_arch['NUM_LAYERS']
def reset(self, flags, task, extra):
# reset variables
self.attn_variables = {}
self.cross_attn_dict = {}
self.self_attn_dict = {}
self.duplication_dict = {}
self.query_index = {}
self.output = {}
self.flags = {}
self.spatial_memory = {}
# initialize duplication
for key, values in self.p_duplication.items():
for name in values:
self.duplication_dict["{}_{}".format(key, name)] = self.p_duplication[key][name]
# initialize flag
self.flags = {"object": True}
self.flags.update(flags)
# initialize task
self.task = task
# initialize output
if self.task_switch['mask']:
self.output['predictions_class'] = []
self.output['predictions_mask'] = []
self.output['predictions_maskemb'] = []
if self.task_switch['bbox']:
self.output['predictions_bbox'] = []
if self.task_switch['spatial'] and ('spatial' in self.flags and self.flags['spatial']==True):
self.output['predictions_pos_spatial'] = []
self.output['predictions_neg_spatial'] = []
if self.task_switch['spatial'] and ('memories_spatial' in self.flags and self.flags['memories_spatial']==True):
self.spatial_memory['prev_batch_mask'] = extra['prev_mask']
if (self.task_switch['grounding'] and ('grounding' in self.flags and self.flags['grounding']==True)) \
or (self.task_switch['audio'] and ('audio' in self.flags and self.flags['audio']==True)):
self.output['predictions_caption'] = []
if self.task_switch['visual']:
self.output['predictions_pos_visual'] = []
self.output['predictions_neg_visual'] = []
# initialize cross_attn, whether the variable is used in cross attention
for key, values in self.p_cross_attn.items():
for name in values:
self.cross_attn_dict["{}_{}".format(key, name)] = self.p_cross_attn[key][name]
# initialize self_attn, whether the variable is used in self attention, and the interactions between queries
for key, values in self.p_self_attn.items():
for name in values:
self.self_attn_dict["{}_{}".format(key, name)] = self.p_self_attn[key][name]
# initialize masking
self.masking = self.p_masking
# initialize query_index
self.query_index = {"all":[0, None]}
def set(self, name, _type, output=None, pos=None, var=None):
if var is not None:
self.attn_variables[name] = var
elif name in self.duplication_dict:
assert self.duplication_dict[name] in self.attn_variables, "Duplication variable {} is not initialized yet.".format(name)
self.attn_variables[name] = self.attn_variables[self.duplication_dict[name]].copy()
else:
var = Variable(output, name, _type, pos)
self.attn_variables[name] = var
def set_results(self, results):
for name in self.cross_attn_name:
self.attn_variables[name].attn_mask = results['attn_mask'][:,self.query_index[name][0]:self.query_index[name][1]]
for key in self.output:
self.output[key].append(results[key])
def set_maskings(self, name, masking):
self.attn_variables[name].masking = masking
def cross_attn_variables(self, ):
cross_attn_name = [key for key, value in self.cross_attn_dict.items()
if (value==True) and (key in self.attn_variables)
and ((key not in self.flags) or (key in self.flags and self.flags[key]==True))]
self.cross_attn_name = cross_attn_name
output = torch.cat([self.attn_variables[name].output for name in cross_attn_name])
pos_emb = torch.cat([self.attn_variables[name].pos for name in cross_attn_name])
index = 0
for name in cross_attn_name:
self.query_index[name] = [index, index + self.attn_variables[name].output.shape[0]]
index += self.attn_variables[name].output.shape[0]
return output, pos_emb
def cross_attn_mask(self, size, num_heads):
attn_mask = torch.cat([self.attn_variables[name].attn_mask for name in self.cross_attn_name], dim=1)
# hard code memories_spatial to previous selected mask
if 'memories_spatial' in self.cross_attn_name:
memory_attn_mask = self.spatial_memory['prev_batch_mask']
bs,c,_,_ = memory_attn_mask.shape
memory_attn_mask = F.interpolate(memory_attn_mask, size, mode='bilinear', align_corners=False)
memory_attn_mask = (memory_attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, num_heads, 1, 1).flatten(0, 1) < 0.5).bool().detach()
attn_mask[:,self.query_index['memories_spatial'][0]:self.query_index['memories_spatial'][1]] = memory_attn_mask
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
return attn_mask
def self_attn(self, bs, num_heads):
self_attn_name = [key for key, value in self.self_attn_dict.items()
if len(value)>0 and key in self.attn_variables
and ((key not in self.flags) or (key in self.flags and self.flags[key]==True))]
self.self_attn_name = self_attn_name
output = torch.cat([self.attn_variables[name].output for name in self_attn_name])
pos_emb = torch.cat([self.attn_variables[name].pos for name in self_attn_name])
index = 0
for name in self_attn_name:
self.query_index[name] = [index, index + self.attn_variables[name].output.shape[0]]
index += self.attn_variables[name].output.shape[0]
self_attn_mask = torch.ones((bs, output.shape[0], output.shape[0]), dtype=torch.bool, device=output.device)
self_attn_pair = []
# build self_attention mask by query interaction
for key1, value in self.self_attn_dict.items():
for key2 in value:
if key1 not in self_attn_name or key2 not in self_attn_name:
# exclude the variables that are not used in the current layer
continue
if (key1 in self.masking or key2 in self.masking) and (key1 != key2):
self_attn_pair += [[key1, key2]]
self_attn_mask[:,self.query_index[key1][0]:self.query_index[key1][1], self.query_index[key2][0]:self.query_index[key2][1]] = False
# build self_attention mask by masking, for birectional
for key in self.masking:
if key in self_attn_name:
self_attn_mask[:,self.query_index[key][0]:self.query_index[key][1],self.query_index[key][0]:self.query_index[key][1]][self.attn_variables[key].masking] = True
self_attn_mask[:,self.query_index[key][0]:self.query_index[key][1],self.query_index[key][0]:self.query_index[key][1]].transpose(1,2)[self.attn_variables[key].masking] = True
# build self_attention mask by masking, for uni-directional
for key1, key2 in self_attn_pair:
if key1 not in self_attn_name or key2 not in self_attn_name:
# exclude the variables that are not used in the current layer
continue
if key1 in self.masking:
self_attn_mask[:,self.query_index[key1][0]:self.query_index[key1][1],self.query_index[key2][0]:self.query_index[key2][1]][self.attn_variables[key1].masking] = True # HACK, not verified
if key2 in self.masking:
self_attn_mask[:,self.query_index[key1][0]:self.query_index[key1][1],self.query_index[key2][0]:self.query_index[key2][1]].transpose(1,2)[self.attn_variables[key2].masking] = True
self_attn_mask = self_attn_mask.repeat_interleave(num_heads, dim=0)
return output, pos_emb, self_attn_mask
def update_variables(self, output, mode):
name_set = self.self_attn_name if mode=='self_attn' else self.cross_attn_name
for key in name_set:
self.attn_variables[key].output = output[self.query_index[key][0]:self.query_index[key][1]]
def update_spatial_results(self, results):
v_emb = results['pred_smaskembs']
pred_smasks = results['pred_smasks']
s_emb = results['pred_pspatials']
pred_logits = v_emb @ s_emb.transpose(1,2)
logits_idx_y = pred_logits[:,:,0].max(dim=1)[1]
logits_idx_x = torch.arange(len(logits_idx_y), device=logits_idx_y.device)
logits_idx = torch.stack([logits_idx_x, logits_idx_y]).tolist()
pred_masks_pos = pred_smasks[logits_idx][:,None,]
extra = {"prev_mask": pred_masks_pos}
return extra
def organize_output(self, ):
outputs = {}
outputs['aux_outputs'] = [{} for i in range(self.num_layers)]
for key, values in self.output.items():
for _key, idx_name in zip(predict_name_matcher[key], predict_index_matcher[key]):
if idx_name not in self.query_index:
continue
outputs[_key] = self.output[key][-1][:,self.query_index[idx_name][0]:self.query_index[idx_name][1]]
for idx, aux_values in enumerate(self.output[key][:-1]):
outputs['aux_outputs'][idx][_key] = aux_values[:,self.query_index[idx_name][0]:self.query_index[idx_name][1]]
return outputs | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/utils/attention_data_struct.py |
import torch
import copy
from torch import nn, Tensor
import os
import math
import torch.nn.functional as F
from torch import nn
def rand_sample(x, max_len):
if x.shape[1] <= max_len:
return x
else:
rand_idx = torch.randperm(x.shape[1])[:max_len]
return x[:,rand_idx]
def prepare_features(x, num_feature_levels, pe_layer, input_proj, level_embed):
src = []
pos = []
size_list = []
# disable mask, it does not affect performance
for i in range(num_feature_levels):
size_list.append(x[i].shape[-2:])
pos.append(pe_layer(x[i], None).flatten(2))
src.append(input_proj[i](x[i]).flatten(2) + level_embed.weight[i][None, :, None])
# flatten NxCxHxW to HWxNxC
pos[-1] = pos[-1].permute(2, 0, 1)
src[-1] = src[-1].permute(2, 0, 1)
return src, pos, size_list | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/utils/utils.py |
from typing import Callable, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_, constant_
from torch.overrides import (
has_torch_function, has_torch_function_unary, has_torch_function_variadic,
handle_torch_function)
Tensor = torch.Tensor
class _LinearWithBias(Linear):
bias: Tensor # type: ignore
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True) # type: ignore
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if (query is key or torch.equal(query, key)) and (key is value or torch.equal(key, value)):
# self-attention
q, k, v = F.linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif key is value or torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = F.linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = F.linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = F.linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = F.linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim : (embed_dim * 2)])
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2) :])
else:
q = F.linear(query, q_proj_weight_non_opt, in_proj_bias)
k = F.linear(key, k_proj_weight_non_opt, in_proj_bias)
v = F.linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == torch.float32
or attn_mask.dtype == torch.float64
or attn_mask.dtype == torch.float16
or attn_mask.dtype == torch.uint8
or attn_mask.dtype == torch.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 2D attn_mask is not correct.")
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 3D attn_mask is not correct.")
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float("-inf"),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = F.softmax(attn_output_weights, dim=-1).nan_to_num()
attn_output_weights = F.dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = F.linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
class MultiheadAttention(torch.nn.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True, attn_mask: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shapes for inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: if a 2D mask: :math:`(L, S)` where L is the target sequence length, S is the
source sequence length.
If a 3D mask: :math:`(N\cdot\text{num\_heads}, L, S)` where N is the batch size, L is the target sequence
length, S is the source sequence length. ``attn_mask`` ensure that position i is allowed to attend
the unmasked positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
Shapes for outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/body/decoder/utils/attn.py |
from .registry import model_entrypoints
from .registry import is_model
def build_model(config, **kwargs):
model_name = config['MODEL']['NAME']
if not is_model(model_name):
raise ValueError(f'Unkown model: {model_name}')
return model_entrypoints(model_name)(config, **kwargs) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/architectures/build.py |
_model_entrypoints = {}
def register_model(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_model_entrypoints[model_name] = fn
return fn
def model_entrypoints(model_name):
return _model_entrypoints[model_name]
def is_model(model_name):
return model_name in _model_entrypoints | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/architectures/registry.py |
# --------------------------------------------------------
# SEEM -- Segment Everything Everywhere All At Once
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected])
# --------------------------------------------------------
import random
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from kornia.contrib import distance_transform
from .registry import register_model
from ..utils import configurable
from ..utils import get_iou
from ..backbone import build_backbone, Backbone
from ..body import build_xdecoder_head
from ..modules import sem_seg_postprocess, bbox_postprocess
from ..language import build_language_encoder
from ..language.loss import vl_similarity
from nltk.stem.lancaster import LancasterStemmer
from detectron2.structures import Boxes, ImageList, Instances, BitMasks
from detectron2.utils.memory import retry_if_cuda_oom
from detectron2.data import MetadataCatalog
st = LancasterStemmer()
class SEEM_Model(nn.Module):
"""
Main class for mask classification semantic segmentation architectures.
"""
@configurable
def __init__(
self,
*,
backbone: Backbone,
sem_seg_head: nn.Module,
criterion: nn.Module,
losses: dict,
num_queries: int,
object_mask_threshold: float,
overlap_threshold: float,
metadata,
task_switch: dict,
phrase_prob: float,
size_divisibility: int,
sem_seg_postprocess_before_inference: bool,
pixel_mean: Tuple[float],
pixel_std: Tuple[float],
# inference
semantic_on: bool,
panoptic_on: bool,
instance_on: bool,
test_topk_per_image: int,
train_dataset_name: str,
interactive_mode: str,
interactive_iter: str,
dilation_kernel: torch.Tensor,
):
super().__init__()
self.backbone = backbone
self.sem_seg_head = sem_seg_head
self.criterion = criterion
self.losses = losses
self.num_queries = num_queries
self.overlap_threshold = overlap_threshold
self.object_mask_threshold = object_mask_threshold
self.metadata = metadata
if size_divisibility < 0:
# use backbone size_divisibility if not set
size_divisibility = self.backbone.size_divisibility
self.size_divisibility = size_divisibility
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
# additional args
self.semantic_on = semantic_on
self.instance_on = instance_on
self.panoptic_on = panoptic_on
# caption argument
self.task_switch = task_switch
self.phrase_prob = phrase_prob
self.test_topk_per_image = test_topk_per_image
self.train_class_names = None
self.interactive_mode = interactive_mode
self.interactive_iter = interactive_iter
if not self.semantic_on:
assert self.sem_seg_postprocess_before_inference
self.register_buffer("dilation_kernel", dilation_kernel)
@classmethod
def from_config(cls, cfg):
enc_cfg = cfg['MODEL']['ENCODER']
dec_cfg = cfg['MODEL']['DECODER']
openimage_switch = {'grounding': dec_cfg['OPENIMAGE']['GROUNDING'].get('ENABLED', False),
'mask': dec_cfg['OPENIMAGE'].get('ENABLED', False)}
task_switch = {'bbox': dec_cfg.get('DETECTION', False),
'mask': dec_cfg.get('MASK', True),
'spatial': dec_cfg['SPATIAL'].get('ENABLED', False),
'grounding': dec_cfg['GROUNDING'].get('ENABLED', False),
'openimage': openimage_switch,
'visual': dec_cfg['VISUAL'].get('ENABLED', False),
'audio': dec_cfg['AUDIO'].get('ENABLED', False)}
# build model
extra = {'task_switch': task_switch}
backbone = build_backbone(cfg)
lang_encoder = build_language_encoder(cfg)
sem_seg_head = build_xdecoder_head(cfg, backbone.output_shape(), lang_encoder, extra=extra)
# Training Settings.
loss_weights = {}
matcher = None
losses = {}
weight_dict = {}
grd_weight = {}
top_x_layers = {}
criterion = None
train_dataset_name = None
phrase_prob = None
# Loss parameters:
deep_supervision = None
no_object_weight = None
interactive_mode = 'best'
interactive_iter = 20
dilation = 3
dilation_kernel = torch.ones((1, 1, dilation, dilation), device=torch.cuda.current_device())
return {
"backbone": backbone,
"sem_seg_head": sem_seg_head,
"criterion": criterion,
"losses": losses,
"num_queries": dec_cfg['NUM_OBJECT_QUERIES'],
"object_mask_threshold": dec_cfg['TEST']['OBJECT_MASK_THRESHOLD'],
"overlap_threshold": dec_cfg['TEST']['OVERLAP_THRESHOLD'],
"metadata": None,
"size_divisibility": dec_cfg['SIZE_DIVISIBILITY'],
"sem_seg_postprocess_before_inference": (
dec_cfg['TEST']['SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE']
or dec_cfg['TEST']['PANOPTIC_ON']
or dec_cfg['TEST']['INSTANCE_ON']
),
"pixel_mean": cfg['INPUT']['PIXEL_MEAN'],
"pixel_std": cfg['INPUT']['PIXEL_STD'],
"task_switch": task_switch,
"phrase_prob": phrase_prob,
# inference
"semantic_on": dec_cfg['TEST']['SEMANTIC_ON'],
"instance_on": dec_cfg['TEST']['INSTANCE_ON'],
"panoptic_on": dec_cfg['TEST']['PANOPTIC_ON'],
"test_topk_per_image": cfg['MODEL']['DECODER']['TEST']['DETECTIONS_PER_IMAGE'],
"train_dataset_name": train_dataset_name,
"interactive_mode": interactive_mode,
"interactive_iter": interactive_iter,
"dilation_kernel": dilation_kernel,
}
@property
def device(self):
return self.pixel_mean.device
def forward(self, batched_inputs, mode='default'):
if self.training:
losses = {}
if self.task_switch['mask']:
losses_seg = self.forward_seg(batched_inputs)
losses.update(losses_seg)
if self.task_switch['openimage'] and self.task_switch['openimage']['mask']:
losses_openimage = self.forward_openimage(batched_inputs['openimage'])
losses_openimage = {key.replace('mask', 'openimage'):value for key, value in losses_openimage.items()}
losses_openimage = {key.replace('grounding', 'grounding_openimage'):value for key, value in losses_openimage.items()}
losses.update(losses_openimage)
for k in list(losses.keys()):
if k in self.criterion.weight_dict:
losses[k] *= self.criterion.weight_dict[k]
else: # remove this loss if not specified in `weight_dict`
losses.pop(k)
return losses
else:
if mode == 'interactive':
return self.evaluate_interactive(batched_inputs)
elif mode == 'grounding_spatial':
return self.evaluate_grounding_sptial(batched_inputs, mode)
elif mode in ['grounding_phrasecut', 'grounding_refcoco']:
return self.evaluate_grounding(batched_inputs, mode)
else:
return self.evaluate(batched_inputs)
def forward_seg(self, batched_inputs):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
self.sem_seg_head.predictor.lang_encoder.get_text_embeddings(self.train_class_names, is_eval=False)
extra = {}
# mask classification target
if "instances" in batched_inputs[0]:
# input bounding box is checked to be correct.
targets = self.prepare_targets(batched_inputs, images)
if self.task_switch['grounding']:
grounding_tokens = [x['grounding_query_embs'] for x in targets] # need to pad for more than one grounding token
grounding_tokens = nn.utils.rnn.pad_sequence(grounding_tokens, padding_value=-1)
non_zero_query_mask = (grounding_tokens.sum(dim=-1) == -grounding_tokens.shape[-1])
grounding_tokens[non_zero_query_mask] = 0
extra['grounding_tokens'] = grounding_tokens
extra['grounding_nonzero_mask'] = non_zero_query_mask.t()
if self.task_switch['spatial']:
pos_masks = [x['spatial_query']['rand_shape'].to(self.device) for x in batched_inputs]
neg_masks = [(x['spatial_query']['rand_shape'].to(self.device) & False) for x in batched_inputs]
fp_masks = torch.stack([(x['spatial_query']['rand_shape'].to(self.device) & False) for x in batched_inputs])
extra.update({'spatial_query_pos_mask': pos_masks, 'spatial_query_neg_mask': neg_masks, 'false_positive_mask': fp_masks})
features = self.backbone(images.tensor)
mask_features, _, multi_scale_features = self.sem_seg_head.pixel_decoder.forward_features(features)
# forward spatial only without gradient
if self.task_switch['spatial']:
with torch.no_grad():
# generate random integeter between [0,3]
rand_iter_num = random.randint(0, 2)
for i in range(rand_iter_num):
outputs = self.sem_seg_head.predictor(multi_scale_features, mask_features, extra=extra, task='spatial')
extra.update(outputs)
extra.update(self.prepare_next_spaital_mask(extra, batched_inputs))
outputs = self.sem_seg_head.predictor(multi_scale_features, mask_features, extra=extra, task='seg')
extra = {'lang_logit': self.sem_seg_head.predictor.lang_encoder.logit_scale,
'class_embeddings': getattr(self.sem_seg_head.predictor.lang_encoder, '{}_text_embeddings'.format('default')),
'false_positive_mask': extra['false_positive_mask']}
# bipartite matching-based loss
self.criterion.losses = self.losses['seg'] # seg criterion losses
losses = self.criterion(outputs, targets, extra)
del outputs
return losses
def evaluate_demo(self, batched_inputs):
assert len(batched_inputs) == 1, "only support batch size equal to 1"
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
img_bs = images.tensor.shape[0]
targets = targets_grounding = queries_grounding = None
features = self.backbone(images.tensor)
mask_features, transformer_encoder_features, multi_scale_features = self.sem_seg_head.pixel_decoder.forward_features(features)
image_sizes = [x["image"].shape[-2:] for x in batched_inputs]
extra = {}
if 'stroke' in batched_inputs[0]:
pos_masks = (batched_inputs[0]['stroke'].to(self.device)).unbind(0)
pos_masks = ImageList.from_tensors(pos_masks, self.size_divisibility).tensor.unbind(0)
neg_masks = (batched_inputs[0]['stroke'].to(self.device) & False).unbind(0)
neg_masks = ImageList.from_tensors(neg_masks, self.size_divisibility).tensor.unbind(0)
extra.update({'spatial_query_pos_mask': pos_masks, 'spatial_query_neg_mask': neg_masks})
if 'visual' in batched_inputs[0]:
extra.update(batched_inputs[0]['visual'])
if 'text' in batched_inputs[0]:
gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(batched_inputs[0]['text'], name='grounding', token=False, norm=False)
token_emb = gtext['token_emb']
tokens = gtext['tokens']
query_emb = token_emb[tokens['attention_mask'].bool()]
non_zero_query_mask = torch.zeros(query_emb[:,None].shape[:-1], dtype=torch.bool, device=query_emb.device)
extra['grounding_tokens'] = query_emb[:,None]
extra['grounding_nonzero_mask'] = non_zero_query_mask.t()
extra['grounding_class'] = gtext['class_emb']
if 'audio' in batched_inputs[0]:
gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(batched_inputs[0]['audio'], name='grounding', token=False, norm=False)
token_emb = gtext['token_emb']
tokens = gtext['tokens']
query_emb = token_emb[tokens['attention_mask'].bool()]
non_zero_query_mask = torch.zeros(query_emb[:,None].shape[:-1], dtype=torch.bool, device=query_emb.device)
extra['audio_tokens'] = query_emb[:,None]
extra['audio_nonzero_mask'] = non_zero_query_mask.t()
extra['audio_class'] = gtext['class_emb']
outputs = self.sem_seg_head.predictor(multi_scale_features, mask_features, target_queries=queries_grounding, extra=extra, task='demo')
return outputs, images.tensor.shape, extra
assert self.task_switch['spatial']
assert 'spatial_query' in batched_inputs[0]
assert len(batched_inputs) == 1, "only support batch size equal to 1"
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
img_bs = images.tensor.shape[0]
targets = targets_grounding = queries_grounding = None
extra = {}
features = self.backbone(images.tensor)
mask_features, transformer_encoder_features, multi_scale_features = self.sem_seg_head.pixel_decoder.forward_features(features)
image_sizes = [x["image"].shape[-2:] for x in batched_inputs]
nm = len(batched_inputs[0]['spatial_query']['rand_shape'])
multi_scale_features = [m.repeat(nm,1,1,1) for m in multi_scale_features]
mask_features = mask_features.repeat(nm,1,1,1)
all_batch_shape_iou = []
pred_smask_pointer = None
prev_smask_pointer = None
pred_smask_all = None
query_index = self.sem_seg_head.predictor.query_index
assert self.interactive_mode == 'best'
pos_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device)).unbind(0)
pos_masks = ImageList.from_tensors(pos_masks, self.size_divisibility).tensor.unbind(0)
neg_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device) & False).unbind(0)
neg_masks = ImageList.from_tensors(neg_masks, self.size_divisibility).tensor.unbind(0)
extra.update({'spatial_query_pos_mask': pos_masks, 'spatial_query_neg_mask': neg_masks})
for i in range(self.interactive_iter):
outputs = self.sem_seg_head.predictor(multi_scale_features, mask_features, target_queries=queries_grounding, extra=extra, task='spatial')
extra.update(outputs)
pred_smask = F.interpolate(outputs['prev_mask'], images.tensor.shape[-2:], mode='bicubic')
s = image_sizes[0]
b = batched_inputs[0]
pred_smask_all = F.interpolate(pred_smask[:,:,:s[0],:s[1]], (b['height'], b['width']), mode='bicubic')[:,0].sigmoid() > 0.5
gt_smask = b['gt_masks_orisize']
all_batch_shape_iou += [get_iou(gt_smask, pred_smask_all)]
extra.update(self.prepare_next_spaital_mask(extra, batched_inputs))
all_batch_shape_iou = torch.stack(all_batch_shape_iou)
processed_results = [{"mask_iou": all_batch_shape_iou[:,i]} for i in range(len(all_batch_shape_iou[0]))]
return processed_results
def evaluate(self, batched_inputs):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
img_bs = images.tensor.shape[0]
targets = targets_grounding = queries_grounding = None
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features, target_queries=queries_grounding)
mask_cls_results = outputs["pred_logits"]
mask_pred_results = outputs["pred_masks"]
box_pred_results = outputs["pred_boxes"] if self.task_switch['bbox'] else [None for i in range(len(mask_pred_results))]
# upsample masks
mask_pred_results = F.interpolate(
mask_pred_results,
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)
input_size = mask_pred_results.shape[-2:]
del outputs
processed_results = []
for mask_cls_result, mask_pred_result, box_pred_result, input_per_image, image_size in zip(
mask_cls_results, mask_pred_results, box_pred_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
processed_results.append({})
if self.sem_seg_postprocess_before_inference:
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
mask_pred_result, image_size, height, width
)
mask_cls_result = mask_cls_result.to(mask_pred_result)
# semantic segmentation inference
if self.semantic_on:
r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result)
if not self.sem_seg_postprocess_before_inference:
r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width)
processed_results[-1]["sem_seg"] = r
# panoptic segmentation inference
if self.panoptic_on:
panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result)
processed_results[-1]["panoptic_seg"] = panoptic_r
# instance segmentation inference
if self.instance_on:
if self.task_switch['bbox']:
box_pred_result = bbox_postprocess(box_pred_result, input_size, image_size, height, width)
instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result, box_pred_result)
processed_results[-1]["instances"] = instance_r
return processed_results
def evaluate_interactive(self, batched_inputs):
assert self.task_switch['spatial']
assert 'spatial_query' in batched_inputs[0]
assert len(batched_inputs) == 1, "only support batch size equal to 1"
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
img_bs = images.tensor.shape[0]
targets = targets_grounding = queries_grounding = None
extra = {}
features = self.backbone(images.tensor)
mask_features, transformer_encoder_features, multi_scale_features = self.sem_seg_head.pixel_decoder.forward_features(features)
image_sizes = [x["image"].shape[-2:] for x in batched_inputs]
nm = len(batched_inputs[0]['spatial_query']['rand_shape'])
multi_scale_features = [m.repeat(nm,1,1,1) for m in multi_scale_features]
mask_features = mask_features.repeat(nm,1,1,1)
all_batch_shape_iou = []
pred_smask_pointer = None
prev_smask_pointer = None
pred_smask_all = None
query_index = self.sem_seg_head.predictor.query_index
assert self.interactive_mode == 'best'
pos_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device)).unbind(0)
pos_masks = ImageList.from_tensors(pos_masks, self.size_divisibility).tensor.unbind(0)
neg_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device) & False).unbind(0)
neg_masks = ImageList.from_tensors(neg_masks, self.size_divisibility).tensor.unbind(0)
extra.update({'spatial_query_pos_mask': pos_masks, 'spatial_query_neg_mask': neg_masks})
for i in range(self.interactive_iter):
outputs = self.sem_seg_head.predictor(multi_scale_features, mask_features, target_queries=queries_grounding, extra=extra, task='spatial')
extra.update(outputs)
pred_smask = F.interpolate(outputs['prev_mask'], images.tensor.shape[-2:], mode='bicubic')
s = image_sizes[0]
b = batched_inputs[0]
pred_smask_all = F.interpolate(pred_smask[:,:,:s[0],:s[1]], (b['height'], b['width']), mode='bicubic')[:,0].sigmoid() > 0.5
gt_smask = b['gt_masks_orisize']
all_batch_shape_iou += [get_iou(gt_smask, pred_smask_all)]
extra.update(self.prepare_next_spaital_mask(extra, batched_inputs))
all_batch_shape_iou = torch.stack(all_batch_shape_iou)
processed_results = [{"mask_iou": all_batch_shape_iou[:,i]} for i in range(len(all_batch_shape_iou[0]))]
return processed_results
def evaluate_referring_image(self, batched_inputs, extra={}):
assert self.task_switch['spatial']
assert len(batched_inputs) == 1, "only support batch size equal to 1"
assert self.interactive_mode == 'best'
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
img_bs = images.tensor.shape[0]
targets = targets_grounding = queries_grounding = None
features = self.backbone(images.tensor)
mask_features, transformer_encoder_features, multi_scale_features = self.sem_seg_head.pixel_decoder.forward_features(features)
if 'spatial_query' in batched_inputs[0]:
image_sizes = [x["image"].shape[-2:] for x in batched_inputs]
nm = len(batched_inputs[0]['spatial_query']['rand_shape'])
multi_scale_features = [m.repeat(nm,1,1,1) for m in multi_scale_features]
mask_features = mask_features.repeat(nm,1,1,1)
query_index = self.sem_seg_head.predictor.query_index
pos_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device)).unbind(0)
pos_masks = ImageList.from_tensors(pos_masks, self.size_divisibility).tensor.unbind(0)
neg_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device) & False).unbind(0)
neg_masks = ImageList.from_tensors(neg_masks, self.size_divisibility).tensor.unbind(0)
extra.update({'spatial_query_pos_mask': pos_masks, 'spatial_query_neg_mask': neg_masks})
outputs = self.sem_seg_head.predictor(multi_scale_features, mask_features, target_queries=queries_grounding, extra=extra, task='refimg')
return outputs, images.tensor.shape
def evaluate_grounding(self, batched_inputs, mode):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
assert len(images.tensor) == 1, "grounding evaluation only support single batch size now"
extra = {}
# mask_pred_results = []
# for idx, batch_per_image in enumerate(batched_inputs):
# grd_texts = batch_per_image['groundings']['texts']
# grd_masks = []
# for anno_text in grd_texts:
# gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings([anno_text[0]], name='grounding', token=False, norm=False)
# token_emb = gtext['token_emb']
# tokens = gtext['tokens']
# grd_emb = token_emb[0][tokens['attention_mask'].bool()[0]]
# extra['grounding_tokens'] = grd_emb[:,None]
# assert len(images.tensor) == 1, "grounding evaluation only support single batch size now"
# features = self.backbone(images.tensor)
# outputs = self.sem_seg_head(features, extra=extra, task='grounding_eval')
# pred_gmasks = outputs['pred_masks'][idx,self.num_queries:2*self.num_queries-1]
# v_emb = outputs['pred_captions'][idx,self.num_queries:2*self.num_queries-1]
# t_emb = grd_emb[-1:]
# t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
# v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
# temperature = self.sem_seg_head.predictor.lang_encoder.logit_scale
# out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
# matched_id = out_prob.max(0)[1]
# grd_masks += [pred_gmasks[matched_id,:,:]]
# mask_pred_results += [torch.cat(grd_masks)]
# comment for multi object inference.
mask_pred_results = []
for idx, batch_per_image in enumerate(batched_inputs):
grd_texts = batch_per_image['groundings']['texts']
grd_texts = [x[0] for x in grd_texts]
gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(grd_texts, name='grounding', token=False, norm=False)
token_emb = gtext['token_emb']
tokens = gtext['tokens']
query_emb = token_emb[tokens['attention_mask'].bool()]
non_zero_query_mask = torch.zeros(query_emb[:,None].shape[:-1], dtype=torch.bool, device=query_emb.device)
extra['grounding_tokens'] = query_emb[:,None]
extra['grounding_nonzero_mask'] = non_zero_query_mask.t()
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features, extra=extra, task='grounding_eval')
pred_gmasks = outputs['pred_gmasks'][idx]
v_emb = outputs['pred_gtexts'][idx]
t_emb = gtext['class_emb']
t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
temperature = self.sem_seg_head.predictor.lang_encoder.logit_scale
out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
matched_id = out_prob.max(0)[1]
mask_pred_results += [pred_gmasks[matched_id,:,:]]
for i in range(len(mask_pred_results)):
# upsample masks
mask_pred_results[i] = F.interpolate(
mask_pred_results[i][None,],
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)[0]
processed_results = []
for mask_pred_result, input_per_image, image_size in zip(
mask_pred_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
processed_results.append({})
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
mask_pred_result, image_size, height, width
)
processed_results[-1]['grounding_mask'] = mask_pred_result
# compute bbox
# bbox = BitMasks(mask_pred_result > 0).get_bounding_boxes()
# bbox = BoxMode.convert(bbox.tensor, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
# processed_results[-1]['grounding_box'] = bbox
return processed_results
def evaluate_grounding_sptial(self, batched_inputs, mode):
images = [x["image"].to(self.device) for x in batched_inputs]
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
images = ImageList.from_tensors(images, self.size_divisibility)
assert len(images.tensor) == 1, "grounding evaluation only support single batch size now"
extra = {}
dilation = 3
pos_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device)).unbind(0)
pos_masks = ImageList.from_tensors(pos_masks, self.size_divisibility).tensor
pos_masks = (F.conv2d(pos_masks.float(), self.dilation_kernel, padding=dilation//2) > 0).unbind(0)
neg_masks = (batched_inputs[0]['spatial_query']['rand_shape'].to(self.device) & False).unbind(0)
neg_masks = ImageList.from_tensors(neg_masks, self.size_divisibility).tensor.unbind(0)
mask_pred_results = []
for idx, batch_per_image in enumerate(batched_inputs):
grd_texts = batch_per_image['groundings']['texts']
grd_masks = []
for idx2, anno_text in enumerate(grd_texts):
extra.update({'spatial_query_pos_mask': [pos_masks[idx2]], 'spatial_query_neg_mask': [neg_masks[idx2]]})
gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings([anno_text[0]], name='grounding', token=False, norm=False)
token_emb = gtext['token_emb']
tokens = gtext['tokens']
grd_emb = token_emb[0][tokens['attention_mask'].bool()[0]]
non_zero_query_mask = torch.zeros(grd_emb[:,None].shape[:-1], dtype=torch.bool, device=grd_emb.device)
extra['grounding_tokens'] = grd_emb[:,None]
extra['grounding_nonzero_mask'] = non_zero_query_mask.t()
assert len(images.tensor) == 1, "grounding evaluation only support single batch size now"
features = self.backbone(images.tensor)
outputs = self.sem_seg_head(features, extra=extra, task='grounding_eval')
pred_gmasks = outputs['pred_gmasks'][idx]
v_emb = outputs['pred_gtexts'][idx]
t_emb = gtext['class_emb']
t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
temperature = self.sem_seg_head.predictor.lang_encoder.logit_scale
out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
matched_id = out_prob.max(0)[1]
grd_masks += [pred_gmasks[matched_id,:,:]]
mask_pred_results += [torch.cat(grd_masks)]
# comment for multi object inference.
# mask_pred_results = []
# for idx, batch_per_image in enumerate(batched_inputs):
# grd_texts = batch_per_image['groundings']['texts']
# grd_texts = [x[0] for x in grd_texts]
# gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(grd_texts, name='grounding', token=False, norm=False)
# token_emb = gtext['token_emb']
# tokens = gtext['tokens']
# query_emb = token_emb[tokens['attention_mask'].bool()]
# non_zero_query_mask = torch.zeros(query_emb[:,None].shape[:-1], dtype=torch.bool, device=query_emb.device)
# extra['grounding_tokens'] = query_emb[:,None]
# extra['grounding_nonzero_mask'] = non_zero_query_mask.t()
# features = self.backbone(images.tensor)
# outputs = self.sem_seg_head(features, extra=extra, task='grounding_eval')
# pred_gmasks = outputs['pred_gmasks'][idx]
# v_emb = outputs['pred_gtexts'][idx]
# t_emb = gtext['class_emb']
# t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
# v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
# temperature = self.sem_seg_head.predictor.lang_encoder.logit_scale
# out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
# matched_id = out_prob.max(0)[1]
# mask_pred_results += [pred_gmasks[matched_id,:,:]]
for i in range(len(mask_pred_results)):
# upsample masks
mask_pred_results[i] = F.interpolate(
mask_pred_results[i][None,],
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
mode="bilinear",
align_corners=False,
)[0]
processed_results = []
for mask_pred_result, input_per_image, image_size in zip(
mask_pred_results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
processed_results.append({})
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
mask_pred_result, image_size, height, width
)
processed_results[-1]['grounding_mask'] = mask_pred_result
return processed_results
def prepare_targets(self, batched_inputs, images):
h_pad, w_pad = images.tensor.shape[-2:]
new_targets = []
for idx, batch_per_image in enumerate(batched_inputs):
targets_per_image = batch_per_image['instances'].to(self.device)
# pad gt
gt_masks = targets_per_image.gt_masks.tensor
padded_masks = torch.zeros((gt_masks.shape[0], h_pad, w_pad), dtype=gt_masks.dtype, device=gt_masks.device)
padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks
gt_boxes = targets_per_image.gt_boxes.tensor
ratio = torch.tensor([w_pad,h_pad,w_pad,h_pad]).to(gt_boxes.device)[None,:]
gt_boxes = gt_boxes / ratio
xc,yc,w,h = (gt_boxes[:,0] + gt_boxes[:,2])/2, (gt_boxes[:,1] + gt_boxes[:,3])/2, gt_boxes[:,2] - gt_boxes[:,0], gt_boxes[:,3] - gt_boxes[:,1]
gt_boxes = torch.stack([xc,yc,w,h]).permute(1,0)
target_dict = {
"labels": targets_per_image.gt_classes,
"is_things": targets_per_image.is_things,
"masks": padded_masks,
"boxes": gt_boxes,
}
if self.task_switch['spatial']:
# prepare targets for spatial query
target_dict['gt_spatial_masks'] = batch_per_image['spatial_query']['gt_masks']
if self.task_switch['grounding']:
grd_masks = batch_per_image['groundings']['masks']
grd_texts = batch_per_image['groundings']['texts']
grd_hash = batch_per_image['groundings']['hash']
grd_task = batch_per_image['groundings']['mode']
if len(grd_masks) == 0:
padded_masks = None
else:
padded_masks = torch.zeros((grd_masks.shape[0], h_pad, w_pad), dtype=grd_masks.dtype, device=grd_masks.device)
padded_masks[:, : grd_masks.shape[1], : grd_masks.shape[2]] = grd_masks
gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(grd_texts, name='grounding', token=False, norm=False)
token_emb = gtext['token_emb']
tokens = gtext['tokens']
unique_hash_id = np.unique(grd_hash, return_index=True)[1]
selected_mask = np.zeros(len(grd_hash)).astype(np.bool)
selected_mask[unique_hash_id] = True
selected_token_emb = token_emb[selected_mask]
selected_attn_mask = tokens['attention_mask'][selected_mask]
query_emb = selected_token_emb[selected_attn_mask.bool()]
class_idx = tokens['attention_mask'].sum(dim=-1) - 1
class_idx = torch.stack((torch.arange(len(class_idx), device=class_idx.device), class_idx)).tolist()
class_emb = token_emb[class_idx]
target_dict['grounding_masks'] = padded_masks
target_dict['grounding_query_embs'] = query_emb
target_dict['grounding_class_embs'] = class_emb
target_dict['grounding_hash'] = grd_hash
target_dict['grounding_task'] = grd_task
new_targets.append(target_dict)
return new_targets
def prepare_next_spaital_mask(self, outputs, batched_inputs):
gt_masks = [batched_inputs[i]['spatial_query']['gt_masks'] for i in range(len(batched_inputs))]
if self.training:
gt_masks = ImageList.from_tensors(gt_masks, self.size_divisibility).tensor
else:
gt_masks = ImageList.from_tensors(gt_masks, self.size_divisibility).tensor.transpose(0,1)
pred_masks = (F.interpolate(outputs['prev_mask'], size=gt_masks.shape[-2:], mode='bilinear', align_corners=False).sigmoid() > 0.5)
prev_masks = torch.stack(outputs['spatial_query_pos_mask']) | torch.stack(outputs['spatial_query_neg_mask'])
fn = gt_masks & (~(gt_masks & pred_masks)) & (~prev_masks) # fn: False Negative, gt:1, pred:0, prev:0
fp = (~gt_masks & pred_masks) & (~prev_masks) # fp: False Positive, gt:0, pred:1, prev:0
# compute iou between gt and pred
iou = (gt_masks & pred_masks).sum(list(range(1,len(fn.shape)))) / ((gt_masks | pred_masks).sum(dim=list(range(1,len(fn.shape)))) + 1e-8)
fn_sum = fn.sum(dim=list(range(1,len(fn.shape))))
fp_sum = fp.sum(dim=list(range(1,len(fp.shape))))
is_postive = fn_sum > fp_sum
# is_postive = torch.ones(len(fn_sum), device=torch.cuda.current_device()).bool()
select_mask = torch.stack([fn[i] if is_postive[i] else fp[i] for i in range(len(fn))])
# conv implementation
n,_,h,w=select_mask.shape
mask_dt = (distance_transform((~F.pad(select_mask, pad=(1, 1, 1, 1), mode='constant', value=0)).float())[:,:,1:-1,1:-1]).reshape(n,-1)
max_xy_idx = torch.stack([torch.arange(n), mask_dt.max(dim=-1)[1].cpu()]).tolist()
next_mask = torch.zeros(gt_masks.shape, device=torch.cuda.current_device()).bool()
next_mask = next_mask.view(n,-1)
next_mask[max_xy_idx] = True
next_mask = next_mask.reshape((n,1,h,w)).float()
dilation = 3
next_mask = F.conv2d(next_mask, self.dilation_kernel, padding=dilation//2) > 0
# determine whether next mask is zero
keep = (iou < 0.925)
next_mask = next_mask & keep.view(-1,1,1,1)
pos_mask = []
neg_mask = []
for idx, ip in enumerate(is_postive):
if ip:
pos_mask += [outputs['spatial_query_pos_mask'][idx] | next_mask[idx]]
neg_mask += [outputs['spatial_query_neg_mask'][idx]]
else:
pos_mask += [outputs['spatial_query_pos_mask'][idx]]
neg_mask += [outputs['spatial_query_neg_mask'][idx] | next_mask[idx]]
if 'false_positive_mask' in outputs:
fp = outputs['false_positive_mask'] | fp
return {'spatial_query_pos_mask': pos_mask, 'spatial_query_neg_mask': neg_mask, 'false_positive_mask': fp}
def semantic_inference(self, mask_cls, mask_pred):
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
mask_pred = mask_pred.sigmoid()
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred)
return semseg
def panoptic_inference(self, mask_cls, mask_pred):
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
mask_pred = mask_pred.sigmoid()
keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_mask_cls = mask_cls[keep]
cur_mask_cls = cur_mask_cls[:, :-1]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device)
segments_info = []
current_segment_id = 0
if cur_masks.shape[0] == 0:
# We didn't detect any mask :(
return panoptic_seg, segments_info
else:
# take argmax
cur_mask_ids = cur_prob_masks.argmax(0)
stuff_memory_list = {}
for k in range(cur_classes.shape[0]):
pred_class = cur_classes[k].item()
isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values()
mask_area = (cur_mask_ids == k).sum().item()
original_area = (cur_masks[k] >= 0.5).sum().item()
mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5)
if mask_area > 0 and original_area > 0 and mask.sum().item() > 0:
if mask_area / original_area < self.overlap_threshold:
continue
# merge stuff regions
if not isthing:
if int(pred_class) in stuff_memory_list.keys():
panoptic_seg[mask] = stuff_memory_list[int(pred_class)]
continue
else:
stuff_memory_list[int(pred_class)] = current_segment_id + 1
current_segment_id += 1
panoptic_seg[mask] = current_segment_id
segments_info.append(
{
"id": current_segment_id,
"isthing": bool(isthing),
"category_id": int(pred_class),
}
)
return panoptic_seg, segments_info
def instance_inference(self, mask_cls, mask_pred, box_pred):
# mask_pred is already processed to have the same shape as original input
image_size = mask_pred.shape[-2:]
# [Q, K]
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1)
# scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False)
scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False)
labels_per_image = labels[topk_indices]
topk_indices = (topk_indices // self.sem_seg_head.num_classes)
# mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1)
mask_pred = mask_pred[topk_indices]
if box_pred is not None:
box_pred = box_pred[topk_indices]
# if this is panoptic segmentation, we only keep the "thing" classes
if self.panoptic_on:
keep = torch.zeros_like(scores_per_image).bool()
for i, lab in enumerate(labels_per_image):
keep[i] = lab in self.metadata.thing_dataset_id_to_contiguous_id.values()
scores_per_image = scores_per_image[keep]
labels_per_image = labels_per_image[keep]
mask_pred = mask_pred[keep]
if box_pred is not None:
box_pred = box_pred[keep]
result = Instances(image_size)
# mask (before sigmoid)
result.pred_masks = (mask_pred > 0).float()
# result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
# Uncomment the following to get boxes from masks (this is slow)
if box_pred is not None:
result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes()
else:
result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
# calculate average mask prob
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6)
result.scores = scores_per_image * mask_scores_per_image
result.pred_classes = labels_per_image
return result
@register_model
def get_segmentation_model(cfg, **kwargs):
return SEEM_Model(cfg) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/architectures/seem_model.py |
from .seem_model import *
from .build import build_model | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/architectures/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py
# Modified by Xueyan Zou
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
from typing import List, Optional
import torch
import torch.distributed as dist
import torchvision
from torch import Tensor
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
elif tensor_list[0].ndim == 2:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(txt.shape) for txt in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, l = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, l), dtype=torch.bool, device=device)
for txt, pad_txt, m in zip(tensor_list, tensor, mask):
pad_txt[: txt.shape[0], : txt.shape[1]] = txt
m[: txt.shape[1]] = False
else:
raise ValueError("not supported")
return NestedTensor(tensor, mask)
def _collate_and_pad_divisibility(tensor_list: list, div=32):
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.tensor([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
c,h,w = max_size
pad_h = (div - h % div) if h % div != 0 else 0
pad_w = (div - w % div) if w % div != 0 else 0
max_size = (c,h+pad_h,w+pad_w)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
return padded_imgs
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_iou(gt_masks, pred_masks, ignore_label=-1):
rev_ignore_mask = ~(gt_masks == ignore_label)
gt_masks = gt_masks.bool()
n,h,w = gt_masks.shape
intersection = ((gt_masks & pred_masks) & rev_ignore_mask).reshape(n,h*w).sum(dim=-1)
union = ((gt_masks | pred_masks) & rev_ignore_mask).reshape(n,h*w).sum(dim=-1)
ious = (intersection / union)
return ious | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/utils/misc.py |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
import functools
import inspect
def configurable(init_func=None, *, from_config=None):
"""
Decorate a function or a class's __init__ method so that it can be called
with a :class:`CfgNode` object using a :func:`from_config` function that translates
:class:`CfgNode` to arguments.
Examples:
::
# Usage 1: Decorator on __init__:
class A:
@configurable
def __init__(self, a, b=2, c=3):
pass
@classmethod
def from_config(cls, cfg): # 'cfg' must be the first argument
# Returns kwargs to be passed to __init__
return {"a": cfg.A, "b": cfg.B}
a1 = A(a=1, b=2) # regular construction
a2 = A(cfg) # construct with a cfg
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
# Usage 2: Decorator on any function. Needs an extra from_config argument:
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
def a_func(a, b=2, c=3):
pass
a1 = a_func(a=1, b=2) # regular call
a2 = a_func(cfg) # call with a cfg
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
Args:
init_func (callable): a class's ``__init__`` method in usage 1. The
class must have a ``from_config`` classmethod which takes `cfg` as
the first argument.
from_config (callable): the from_config function in usage 2. It must take `cfg`
as its first argument.
"""
if init_func is not None:
assert (
inspect.isfunction(init_func)
and from_config is None
and init_func.__name__ == "__init__"
), "Incorrect use of @configurable. Check API documentation for examples."
@functools.wraps(init_func)
def wrapped(self, *args, **kwargs):
try:
from_config_func = type(self).from_config
except AttributeError as e:
raise AttributeError(
"Class with @configurable must have a 'from_config' classmethod."
) from e
if not inspect.ismethod(from_config_func):
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
init_func(self, **explicit_args)
else:
init_func(self, *args, **kwargs)
return wrapped
else:
if from_config is None:
return configurable # @configurable() is made equivalent to @configurable
assert inspect.isfunction(
from_config
), "from_config argument of configurable must be a function!"
def wrapper(orig_func):
@functools.wraps(orig_func)
def wrapped(*args, **kwargs):
if _called_with_cfg(*args, **kwargs):
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
return orig_func(**explicit_args)
else:
return orig_func(*args, **kwargs)
wrapped.from_config = from_config
return wrapped
return wrapper
def _called_with_cfg(*args, **kwargs):
"""
Returns:
bool: whether the arguments contain CfgNode and should be considered
forwarded to from_config.
"""
from omegaconf import DictConfig
if len(args) and isinstance(args[0], (dict)):
return True
if isinstance(kwargs.pop("cfg", None), (dict)):
return True
# `from_config`'s first argument is forced to be "cfg".
# So the above check covers all cases.
return False
def _get_args_from_config(from_config_func, *args, **kwargs):
"""
Use `from_config` to obtain explicit arguments.
Returns:
dict: arguments to be used for cls.__init__
"""
signature = inspect.signature(from_config_func)
if list(signature.parameters.keys())[0] != "cfg":
if inspect.isfunction(from_config_func):
name = from_config_func.__name__
else:
name = f"{from_config_func.__self__}.from_config"
raise TypeError(f"{name} must take 'cfg' as the first argument!")
support_var_arg = any(
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
for param in signature.parameters.values()
)
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
ret = from_config_func(*args, **kwargs)
else:
# forward supported arguments to from_config
supported_arg_names = set(signature.parameters.keys())
extra_kwargs = {}
for name in list(kwargs.keys()):
if name not in supported_arg_names:
extra_kwargs[name] = kwargs.pop(name)
ret = from_config_func(*args, **kwargs)
# forward the other arguments to __init__
ret.update(extra_kwargs)
return ret | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/utils/config.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
def box_xywh_to_xyxy(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [x0, y0, (x0 + x1), (y0 + y1)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = (masks * x.unsqueeze(0))
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = (masks * y.unsqueeze(0))
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/utils/box_ops.py |
from .config import *
from .misc import *
from .box_ops import *
from .it_contrastive import * | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/utils/__init__.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
def is_dist_initialized():
return torch.distributed.is_initialized()
def get_world_size():
if is_dist_initialized():
return torch.distributed.get_world_size()
return 1
def all_gather_grad(x):
if get_world_size() > 1:
all_x = [torch.zeros_like(x) for _ in range(get_world_size())]
torch.distributed.all_gather(all_x, x)
all_x[torch.distributed.get_rank()] = x
x = torch.cat(all_x, dim=0)
return x
@torch.no_grad()
def all_gather_nograd(tensor):
# from albef
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
if get_world_size() > 1:
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
tensor = torch.cat(tensors_gather, dim=0)
return tensor
def image_text_contrastive_loss(image_feat, text_feat, temperature, image_id=None, text_id=None):
# add the following 4 lines
image_feat = all_gather_grad(image_feat)
text_feat = all_gather_grad(text_feat)
logits = torch.matmul(image_feat, text_feat.t())
logits /= temperature
if image_id is None and text_id is None:
gt = torch.arange(logits.shape[0], device=logits.device)
loss1 = F.cross_entropy(logits, gt)
loss2 = F.cross_entropy(logits.t(), gt)
else:
image_id = all_gather_grad(image_id)
text_id = all_gather_grad(text_id)
gt_image = image_id.reshape((-1, 1)) == image_id.reshape((1, -1))
gt_text = text_id.reshape((-1, 1)) == text_id.reshape((1, -1))
gt = torch.logical_or(gt_image, gt_text)
loss1 = -torch.sum(gt * F.log_softmax(logits, dim=1)) / gt.sum()
loss2 = -torch.sum(gt.t() * F.log_softmax(logits.t(), dim=1)) / gt.sum()
return (loss1 + loss2) / 2 * get_world_size() # scale it up by the number of GPUs
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/utils/it_contrastive.py |
from .registry import model_entrypoints
from .registry import is_model
def build_language_encoder(config, **kwargs):
model_name = config['MODEL']['TEXT']['ARCH']
if not is_model(model_name):
raise ValueError(f'Unkown model: {model_name}')
return model_entrypoints(model_name)(config, **kwargs) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/build.py |
import random
import nltk
nltk.data.path.append('/mnt/data/nltk_data')
import numpy as np
from utils.constants import IMAGENET_DEFAULT_TEMPLATES
def get_tag(tokenized, tags):
if not isinstance(tags, (list, tuple)):
tags = [tags]
ret = []
for (word, pos) in nltk.pos_tag(tokenized):
for tag in tags:
if pos == tag:
ret.append(word)
return ret
def get_noun_phrase(tokenized):
# Taken from Su Nam Kim Paper...
grammar = r"""
NBAR:
{<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns
NP:
{<NBAR>}
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
"""
chunker = nltk.RegexpParser(grammar)
chunked = chunker.parse(nltk.pos_tag(tokenized))
continuous_chunk = []
current_chunk = []
for subtree in chunked:
if isinstance(subtree, nltk.Tree):
current_chunk.append(' '.join([token for token, pos in subtree.leaves()]))
elif current_chunk:
named_entity = ' '.join(current_chunk)
if named_entity not in continuous_chunk:
continuous_chunk.append(named_entity)
current_chunk = []
else:
continue
return continuous_chunk
def text_noun_with_prompt_all(text, phrase_prob=0.0, append_text=True):
tokenized = nltk.word_tokenize(text)
if random.random() >= phrase_prob:
nouns = get_tag(tokenized, ['NN', 'NNS', 'NNP'])
else:
nouns = get_noun_phrase(tokenized)
prompt_texts = [np.random.choice(IMAGENET_DEFAULT_TEMPLATES).format(noun) for noun in nouns]
if append_text:
prompt_texts += [text]
nouns += [text]
return prompt_texts, nouns | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/misc.py |
_model_entrypoints = {}
def register_model(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_model_entrypoints[model_name] = fn
return fn
def model_entrypoints(model_name):
return _model_entrypoints[model_name]
def is_model(model_name):
return model_name in _model_entrypoints | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/registry.py |
from .fixvlpencoder import *
from .vlpencoder import *
from .build import build_language_encoder | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/__init__.py |
import pickle
from distutils import log
import torch
import torch.nn.functional as F
import torch.distributed as dist
from einops import rearrange, repeat
from timm.loss import SoftTargetCrossEntropy
soft_cross_entropy = SoftTargetCrossEntropy()
def is_dist_initialized():
return torch.distributed.is_initialized()
def get_world_size():
if is_dist_initialized():
return torch.distributed.get_world_size()
return 1
def get_rank():
if is_dist_initialized():
return dist.get_rank()
return 0
def all_gather_grad(x):
if get_world_size() > 1:
all_x = [torch.zeros_like(x) for _ in range(get_world_size())]
torch.distributed.all_gather(all_x, x)
all_x[torch.distributed.get_rank()] = x
x = torch.cat(all_x, dim=0)
return x
def vl_multilabel_contrastive_loss(image_feat, text_feat, temperature=1):
"""
Args:
image_feat (torch.Tensor): shape [B, L1, C] # B: batch_size, L1: 1, C: 256
text_feat (torch.Tensor): shape [B, L2, C] # B:batch_size, L2: number of selected nouns, C: 256
Returns:
"""
# [B, L1, C], L1 = 1
# image_feat = F.normalize(image_feat, dim=-1)
# [B, L2, C]
# text_feat = F.normalize(text_feat, dim=-1)
# HACK: normalize outside
# [B, L1, L2]
dist_per_img = image_feat @ rearrange(text_feat, 'b l c -> b c l')
# [B, L2, L1]
dist_per_text = text_feat @ rearrange(image_feat, 'b l c -> b c l')
batch = image_feat.shape[0]
img_len = image_feat.shape[1]
text_len = text_feat.shape[1]
# [B, L1, L2]
pos_labels_batch_img = rearrange(torch.ones_like(dist_per_text) / dist_per_text.size(1), 'b l2 l1 -> b l1 l2')
# [B, L2, L1]
pos_labels_batch_text = rearrange(torch.ones_like(dist_per_img) / dist_per_img.size(1), 'b l1 l2 -> b l2 l1')
image_x = rearrange(image_feat, 'b l c -> (b l) c')
text_x = rearrange(text_feat, 'b l c -> (b l) c')
logits_per_img = image_x @ all_gather_grad(text_x).t()
logits_per_text = text_x @ all_gather_grad(image_x).t()
# get label globally
# [B, L1, B, L2, W]
labels_per_img = F.one_hot(
torch.ones(batch, img_len, batch, text_len, dtype=torch.long, device=image_x.device) * get_rank(),
num_classes=get_world_size()).to(image_x.dtype)
labels_per_img *= rearrange(pos_labels_batch_img, 'b l1 l2 -> b l1 1 l2 1') * repeat(
torch.eye(batch, dtype=image_x.dtype, device=image_x.device), 'b1 b2 -> b1 1 b2 1 1')
# [BxL1, WxBxL2]
labels_per_img = rearrange(labels_per_img, 'b1 l1 b2 l2 w -> (b1 l1) (w b2 l2)')
# [B, L2, B, L1, W]
labels_per_text = F.one_hot(
torch.ones(batch, text_len, batch, img_len, dtype=torch.long, device=text_x.device) * get_rank(),
num_classes=get_world_size()).to(text_x.dtype)
labels_per_text *= rearrange(pos_labels_batch_text, 'b l2 l1 -> b l2 1 l1 1') * repeat(
torch.eye(batch, dtype=text_x.dtype, device=image_x.device), 'b2 b1 -> b2 1 b1 1 1')
# [BxL2, WxBxL1]
labels_per_text = rearrange(labels_per_text, 'b2 l2 b1 l1 w -> (b2 l2) (w b1 l1)')
logit_scale = temperature.exp().clamp(max=100)
loss_img = soft_cross_entropy(logit_scale * logits_per_img, labels_per_img)
loss_text = soft_cross_entropy(logit_scale * logits_per_text, labels_per_text)
loss = 0.5 * (loss_img + loss_text)
return loss
def vl_contrastive_loss(image_feat, text_feat, temperature=1):
# if image_id or text_id is None, it should be None across all GPUs
# image_feat = F.normalize(image_feat, dim=1)
# text_feat = F.normalize(text_feat, dim=1)
# handle normalization outside
# add the following 4 lines
image_feat = all_gather_grad(image_feat)
text_feat = all_gather_grad(text_feat)
logits = torch.matmul(image_feat, text_feat.t())
logit_scale = temperature.exp().clamp(max=100)
gt = torch.arange(logits.shape[0], device=logits.device)
loss1 = F.cross_entropy(logit_scale * logits, gt)
loss2 = F.cross_entropy(logit_scale * logits.t(), gt)
return (loss1 + loss2) / 2 # scale it up by the number of GPUs
def all_gather_pickle(data, device):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device)
# obtain Tensor size of each rank
local_size = torch.LongTensor([tensor.numel()]).cuda()
size_list = [torch.LongTensor([0]).cuda() for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).cuda())
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).cuda()
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def all_gather_arbitary_tensor(tensor):
if get_world_size() > 1:
device = tensor.device
tensor_batch = all_gather_pickle(tensor.cpu(), device)
tensor_batch = [x.to(device) for x in tensor_batch]
tensor_batch[torch.distributed.get_rank()] = tensor
tensor_batch = torch.cat(tensor_batch, dim=0)
else:
tensor_batch = tensor
return tensor_batch
def ql_contrastive_loss(image_feat, text_feat, temperature=1):
# add the following 4 lines
image_feat = all_gather_arbitary_tensor(image_feat)
text_feat = all_gather_arbitary_tensor(text_feat)
logits = torch.matmul(image_feat, text_feat.t())
logit_scale = temperature.exp().clamp(max=100)
gt = torch.arange(logits.shape[0], device=logits.device)
loss1 = F.cross_entropy(logit_scale * logits, gt)
loss2 = F.cross_entropy(logit_scale * logits.t(), gt)
return (loss1 + loss2) / 2 # scale it up by the number of GPUs
def vl_similarity(image_feat, text_feat, temperature=1):
# Only support single GPU for now.
logits = torch.matmul(image_feat, text_feat.t())
logits = temperature.exp().clamp(max=100) * logits
return logits
def ql_multi_contrastive_loss(image_feat, text_feat, text_hash, temperature=1):
# add the following 4 lines
image_feat = all_gather_arbitary_tensor(image_feat)
text_feat = all_gather_arbitary_tensor(text_feat)
text_hash_batch = all_gather_pickle(text_hash, text_feat.device)
text_hash_all = torch.cat(text_hash_batch)
text_hash_all_unique = torch.unique(text_hash_all).tolist()
gt = torch.zeros((image_feat.shape[0], len(text_hash_all_unique)), device=text_feat.device)
text_hash_all = text_hash_all.tolist()
text_feat_unique = torch.stack([text_feat[text_hash_all.index(txt)] for txt in text_hash_all_unique])
for idx, txt in enumerate(text_hash_all):
gt[idx][text_hash_all_unique.index(txt)] = 1
logits = torch.matmul(image_feat, text_feat_unique.t())
logits = logits*temperature.exp().clamp(max=100)
loss_img = soft_cross_entropy(logits, gt)
loss_text = soft_cross_entropy(logits.t(), gt.t() / gt.t().sum(-1, keepdim=True))
loss = 0.7 * loss_img + 0.3 * loss_text
return loss
def image_text_contrastive_loss_queue(image_feat_inp, text_feat_inp, lang_enc, training):
# add the following 4 lines
image_feat = all_gather_grad(image_feat_inp.contiguous())
text_feat = all_gather_grad(text_feat_inp.contiguous())
image_feat = image_feat / (image_feat.norm(dim=-1, keepdim=True) + 1e-7)
text_feat = text_feat / (text_feat.norm(dim=-1, keepdim=True) + 1e-7)
temperature = lang_enc.logit_scale
logits = torch.matmul(image_feat, text_feat.t())
logit_scale = temperature.exp().clamp(max=100)
gt = torch.arange(logits.shape[0], device=logits.device)
loss1 = F.cross_entropy(logit_scale * logits, gt)
loss2 = F.cross_entropy(logit_scale * logits.t(), gt)
return (loss1 + loss2) / 2 # scale it up by the number of GPUs | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/loss.py |
# --------------------------------------------------------
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Xueyan Zou ([email protected]), Jianwei Yang ([email protected])
# --------------------------------------------------------
import torch
from torch import nn
from torch.nn import functional as F
from timm.models.layers import trunc_normal_
from .registry import register_model
from ..utils import configurable
from .LangEncoder import build_tokenizer, build_lang_encoder
from utils.misc import prompt_engineering, get_prompt_templates
class LanguageEncoder(nn.Module):
@configurable
def __init__(
self,
tokenizer,
tokenizer_type,
lang_encoder,
lang_projection,
max_token_num,
):
super().__init__()
self.tokenizer = tokenizer
self.tokenizer_type = tokenizer_type
self.lang_encoder = lang_encoder
self.lang_proj = lang_projection
self.max_token_num = max_token_num
self.logit_scale = nn.Parameter(torch.ones([]))
@classmethod
def from_config(cls, cfg):
tokenizer = build_tokenizer(cfg['MODEL']['TEXT'])
tokenizer_type = cfg['MODEL']['TEXT']['TOKENIZER']
lang_encoder = build_lang_encoder(cfg['MODEL']['TEXT'], tokenizer, cfg['VERBOSE'])
max_token_num = cfg['MODEL']['TEXT']['CONTEXT_LENGTH']
dim_lang = cfg['MODEL']['TEXT']['WIDTH']
dim_projection = cfg['MODEL']['DIM_PROJ']
lang_projection = nn.Parameter(torch.empty(dim_lang, dim_projection))
trunc_normal_(lang_projection, std=.02)
return {
"tokenizer": tokenizer,
"tokenizer_type": tokenizer_type,
"lang_encoder": lang_encoder,
"lang_projection": lang_projection,
"max_token_num": max_token_num,
}
def get_text_embeddings(self, class_names, name='default', is_eval=False, add_bgd=False, prompt=True, norm=True):
if not is_eval:
if prompt:
# randomly sample one template
arbitary_concepts = [
prompt_engineering(class_names[label].replace('-other','').replace('-merged','').replace('-stuff',''), topk=10000, suffix='.') \
for label in range(len(class_names))
]
if add_bgd:
arbitary_concepts.append("A background in coco.")
else:
arbitary_concepts = class_names
input_ids = []
attention_masks = []
for txt in arbitary_concepts:
tokens = self.tokenizer(
txt, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt'
)
tokens['input_ids'].squeeze_()
tokens['attention_mask'].squeeze_()
input_ids.append(tokens['input_ids'])
attention_masks.append(tokens['attention_mask'])
arbitary_tokens = torch.stack(input_ids)
arbitary_attention_masks = torch.stack(attention_masks)
text_emb = self.forward_language((arbitary_tokens.cuda(), arbitary_attention_masks.cuda()), norm=norm)
setattr(self, '{}_text_embeddings'.format(name), text_emb)
else:
with torch.no_grad():
def extract_mean_emb(txts):
tokens = self.tokenizer(
txts, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt'
)
clss_embedding = self.forward_language((tokens['input_ids'].cuda(), tokens['attention_mask'].cuda()), norm=norm)
clss_embedding = clss_embedding.mean(dim=0)
clss_embedding /= clss_embedding.norm()
return clss_embedding
templates = get_prompt_templates()
clss_embeddings = []
if prompt:
for clss in class_names:
txts = [template.format(clss.replace('-other','').replace('-merged','').replace('-stuff','')) for template in templates]
clss_embeddings.append(extract_mean_emb(txts))
else:
clss_embeddings.append(extract_mean_emb(class_names))
if add_bgd:
txts = ["A background in coco."]
clss_embeddings.append(extract_mean_emb(txts))
text_emb = torch.stack(clss_embeddings, dim=0)
setattr(self, '{}_text_embeddings'.format(name), text_emb)
def get_text_token_embeddings(self, txts, name='default', token=False, norm=False):
if not token:
tokens = self.tokenizer(
txts, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt'
)
tokens = {key: value.cuda() for key, value in tokens.items()}
else:
tokens = txts
token_emb, class_emb = self.forward_language_token((tokens['input_ids'], tokens['attention_mask']), norm=norm)
ret = {"tokens": tokens,
"token_emb": token_emb,
"class_emb": class_emb,}
setattr(self, '{}_token_embeddings'.format(name), ret)
return ret
def forward_language(self, texts, norm=True):
x = self.lang_encoder(*texts)
x = x['last_hidden_state']
if self.tokenizer_type == 'clip':
x = x[torch.arange(x.size(0)), texts[0].argmax(dim=-1)]
else:
x = x[:, 0]
x = x @ self.lang_proj
if norm:
x = x / (x.norm(dim=-1, keepdim=True) + 1e-7)
return x
def forward_language_token(self, texts, norm=False):
x = self.lang_encoder(*texts)
token_x = x['last_hidden_state']
if self.tokenizer_type == 'clip':
class_x = token_x[torch.arange(token_x.size(0)), texts[0].argmax(dim=-1)]
else:
class_x = token_x[:, 0]
class_x = class_x @ self.lang_proj
token_x = token_x @ self.lang_proj
if norm:
class_x = class_x / (class_x.norm(dim=-1, keepdim=True) + 1e-7)
token_x = token_x / (token_x.norm(dim=-1, keepdim=True) + 1e-7)
return token_x, class_x
def compute_similarity(self, v_emb, name='default', fake=False):
if fake:
return None
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
t_emb = getattr(self, '{}_text_embeddings'.format(name))
output = self.logit_scale.exp() * v_emb @ t_emb.unsqueeze(0).transpose(1, 2)
return output
@register_model
def get_language_model(cfg, **kwargs):
return LanguageEncoder(cfg) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/vlpencoder.py |
from importlib.metadata import requires
import torch
import torch.nn as nn
from .registry import register_model
from .vlpencoder import LanguageEncoder
class FixLanguageEncoder(LanguageEncoder):
def __init__(
self,
*args, **kwargs):
super(FixLanguageEncoder, self).__init__(*args, **kwargs)
self.logit_scale = nn.Parameter(torch.ones([]), requires_grad=False)
@torch.no_grad()
def get_text_embeddings(self, *args, **kwargs):
return super().get_text_embeddings(*args, **kwargs)
@torch.no_grad()
def get_text_token_embeddings(self, *args, **kwargs):
return super().get_text_token_embeddings(*args, **kwargs)
@torch.no_grad()
def forward_language(self, *args, **kwargs):
return super().forward_language(*args, **kwargs)
@torch.no_grad()
def forward_language_token(self, *args, **kwargs):
return super().forward_language_token(*args, **kwargs)
@register_model
def get_language_model(cfg, **kwargs):
return FixLanguageEncoder(cfg) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/fixvlpencoder.py |
import os
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers import AutoTokenizer
from .registry import lang_encoders
from .registry import is_lang_encoder
def build_lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
model_name = config_encoder['NAME']
if not is_lang_encoder(model_name):
raise ValueError(f'Unkown model: {model_name}')
return lang_encoders(model_name)(config_encoder, tokenizer, verbose, **kwargs)
def build_tokenizer(config_encoder):
tokenizer = None
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
if config_encoder['TOKENIZER'] == 'clip':
pretrained_tokenizer = config_encoder.get(
'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
)
tokenizer = CLIPTokenizer.from_pretrained(pretrained_tokenizer)
tokenizer.add_special_tokens({'cls_token': tokenizer.eos_token})
elif config_encoder['TOKENIZER'] == 'clip-fast':
pretrained_tokenizer = config_encoder.get(
'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
)
tokenizer = CLIPTokenizerFast.from_pretrained(pretrained_tokenizer, from_slow=True)
else:
tokenizer = AutoTokenizer.from_pretrained(config_encoder['TOKENIZER'])
return tokenizer
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/LangEncoder/build.py |
_lang_encoders = {}
def register_lang_encoder(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_lang_encoders[model_name] = fn
return fn
def lang_encoders(model_name):
return _lang_encoders[model_name]
def is_lang_encoder(model_name):
return model_name in _lang_encoders
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/LangEncoder/registry.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .build import build_lang_encoder
from .build import build_tokenizer
from .transformer import * | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/LangEncoder/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import logging
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from timm.models.layers import DropPath, trunc_normal_
from .registry import register_lang_encoder
from utils.distributed import is_main_process
from utils.model import register_norm_module
logger = logging.getLogger(__name__)
@register_norm_module
class LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
pdtype = x.dtype
x = x.float()
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x.to(pdtype) + self.bias
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self,
d_model: int,
n_head: int,
attn_mask: torch.Tensor = None,
drop_path: float = 0.0):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def attention(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) \
if self.attn_mask is not None else None
return self.attn(
x, x, x,
key_padding_mask=key_padding_mask,
need_weights=False,
attn_mask=self.attn_mask
)[0]
def forward(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
x = x + self.drop_path(self.attention(self.ln_1(x), key_padding_mask=key_padding_mask))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(self,
context_length: int,
vocab_size: int,
width: int,
layers: int,
heads: int,
drop_path: float = 0.0,
autogressive: bool =True):
super().__init__()
self.token_embedding = nn.Embedding(vocab_size, width)
self.context_length = context_length
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, width)
)
self.width = width
self.layers = layers
self.autogressive = autogressive
attn_mask = self.build_attention_mask() if autogressive else None
dpr = [x.item() for x in torch.linspace(0, drop_path, layers)] # stochastic depth decay rule
self.resblocks = nn.ModuleList(
[
ResidualAttentionBlock(width, heads, attn_mask, dpr[i])
for i in range(layers)
]
)
self.ln_final = LayerNorm(width)
trunc_normal_(self.positional_embedding, std=.02)
# nn.init.normal_(self.token_embedding, std=.02)
trunc_normal_(self.token_embedding.weight, std=.02)
self.apply(self._init_weights)
@property
def dim_out(self):
return self.width
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def _init_weights(self, m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
if is_main_process():
logger.info('=> init weight of Linear/Conv2d from trunc norm')
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
if is_main_process():
logger.info('=> init bias of Linear/Conv2d to zeros')
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.constant_(m.bias, 0)
def load_pretrained(self, pretrained='', pretrained_layers=[], verbose=True):
if os.path.isfile(pretrained):
pretrained_dict = torch.load(pretrained, map_location='cpu')
logging.info(f'=> loading pretrained model {pretrained}')
model_dict = self.state_dict()
stripped_key = lambda x: x[13:] if x.startswith('lang_encoder.') else x
pretrained_dict = {
stripped_key(k): v for k, v in pretrained_dict.items()
if stripped_key(k) in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] == '*'
)
if need_init:
if verbose:
logger.info(f'=> init {k} from {pretrained}')
if 'positional_embedding' in k and v.size() != model_dict[k].size():
positional_embedding_pretrained = v
positional_embedding_current = model_dict[k]
L1, nH1 = positional_embedding_pretrained.size()
L2, nH2 = positional_embedding_current.size()
if nH1 != nH2:
logger.info(f"Error in loading {k}, passing")
else:
if L1 != L2:
logger.info(
'=> load_pretrained: resized variant: {} to {}'
.format((L1, nH1), (L2, nH2))
)
posemb = positional_embedding_pretrained.float()
posemb_grid = posemb.unsqueeze(dim=0).permute(0, 2, 1)
posemb_grid = torch.nn.functional.interpolate(posemb_grid, size=L2, mode='linear')
posemb_grid = posemb_grid.permute(0, 2, 1).squeeze(dim=0)
v = posemb_grid
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
@torch.jit.ignore
def no_weight_decay(self):
return {
'positional_embedding',
'token_embedding',
}
def forward(self, input_ids, attention_mask=None):
key_padding_mask = (attention_mask == 0) if (not self.autogressive and attention_mask is not None) else None
# key_padding_mask = (input_ids == 0) if not self.autogressive else None
x = self.token_embedding(input_ids) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
for block in self.resblocks:
x = block(x, key_padding_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
return {'last_hidden_state': x}
@register_lang_encoder
def lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
transformer = Transformer(
context_length=config_encoder['CONTEXT_LENGTH'],
vocab_size=tokenizer.vocab_size,
width=config_encoder['WIDTH'],
layers=config_encoder['LAYERS'],
heads=config_encoder['HEADS'],
autogressive=config_encoder.get('AUTOGRESSIVE', True)
)
if config_encoder.get('LOAD_PRETRAINED', False):
transformer.load_pretrained(config_encoder['PRETRAINED'], config_encoder.get('PRETRAINED_LAYERS', ['*']))
return transformer
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/language/LangEncoder/transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from detectron2.layers import cat, shapes_to_tensor
from detectron2.structures import BitMasks, Boxes
# from ..layers import cat, shapes_to_tensor
# from ..structures import BitMasks, Boxes
"""
Shape shorthand in this module:
N: minibatch dimension size, i.e. the number of RoIs for instance segmenation or the
number of images for semantic segmenation.
R: number of ROIs, combined over all images, in the minibatch
P: number of points
"""
def point_sample(input, point_coords, **kwargs):
"""
A wrapper around :function:`torch.nn.functional.grid_sample` to support 3D point_coords tensors.
Unlike :function:`torch.nn.functional.grid_sample` it assumes `point_coords` to lie inside
[0, 1] x [0, 1] square.
Args:
input (Tensor): A tensor of shape (N, C, H, W) that contains features map on a H x W grid.
point_coords (Tensor): A tensor of shape (N, P, 2) or (N, Hgrid, Wgrid, 2) that contains
[0, 1] x [0, 1] normalized point coordinates.
Returns:
output (Tensor): A tensor of shape (N, C, P) or (N, C, Hgrid, Wgrid) that contains
features for points in `point_coords`. The features are obtained via bilinear
interplation from `input` the same way as :function:`torch.nn.functional.grid_sample`.
"""
add_dim = False
if point_coords.dim() == 3:
add_dim = True
point_coords = point_coords.unsqueeze(2)
output = F.grid_sample(input, 2.0 * point_coords - 1.0, **kwargs)
if add_dim:
output = output.squeeze(3)
return output
def generate_regular_grid_point_coords(R, side_size, device):
"""
Generate regular square grid of points in [0, 1] x [0, 1] coordinate space.
Args:
R (int): The number of grids to sample, one for each region.
side_size (int): The side size of the regular grid.
device (torch.device): Desired device of returned tensor.
Returns:
(Tensor): A tensor of shape (R, side_size^2, 2) that contains coordinates
for the regular grids.
"""
aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)
r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)
return r.view(1, -1, 2).expand(R, -1, -1)
def get_uncertain_point_coords_with_randomness(
coarse_logits, uncertainty_func, num_points, oversample_ratio, importance_sample_ratio
):
"""
Sample points in [0, 1] x [0, 1] coordinate space based on their uncertainty. The unceratinties
are calculated for each point using 'uncertainty_func' function that takes point's logit
prediction as input.
See PointRend paper for details.
Args:
coarse_logits (Tensor): A tensor of shape (N, C, Hmask, Wmask) or (N, 1, Hmask, Wmask) for
class-specific or class-agnostic prediction.
uncertainty_func: A function that takes a Tensor of shape (N, C, P) or (N, 1, P) that
contains logit predictions for P points and returns their uncertainties as a Tensor of
shape (N, 1, P).
num_points (int): The number of points P to sample.
oversample_ratio (int): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (N, P, 2) that contains the coordinates of P
sampled points.
"""
assert oversample_ratio >= 1
assert importance_sample_ratio <= 1 and importance_sample_ratio >= 0
num_boxes = coarse_logits.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(num_boxes, num_sampled, 2, device=coarse_logits.device, dtype=coarse_logits.dtype)
point_logits = point_sample(coarse_logits, point_coords, align_corners=False)
# It is crucial to calculate uncertainty based on the sampled prediction value for the points.
# Calculating uncertainties of the coarse predictions first and sampling them for points leads
# to incorrect results.
# To illustrate this: assume uncertainty_func(logits)=-abs(logits), a sampled point between
# two coarse predictions with -1 and 1 logits has 0 logits, and therefore 0 uncertainty value.
# However, if we calculate uncertainties for the coarse predictions first,
# both will have -1 uncertainty, and the sampled point will get -1 uncertainty.
point_uncertainties = uncertainty_func(point_logits)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(num_boxes, dtype=torch.long, device=coarse_logits.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
num_boxes, num_uncertain_points, 2
)
if num_random_points > 0:
point_coords = cat(
[
point_coords,
torch.rand(num_boxes, num_random_points, 2, device=coarse_logits.device),
],
dim=1,
)
return point_coords
def get_uncertain_point_coords_on_grid(uncertainty_map, num_points):
"""
Find `num_points` most uncertain points from `uncertainty_map` grid.
Args:
uncertainty_map (Tensor): A tensor of shape (N, 1, H, W) that contains uncertainty
values for a set of points on a regular H x W grid.
num_points (int): The number of points P to select.
Returns:
point_indices (Tensor): A tensor of shape (N, P) that contains indices from
[0, H x W) of the most uncertain points.
point_coords (Tensor): A tensor of shape (N, P, 2) that contains [0, 1] x [0, 1] normalized
coordinates of the most uncertain points from the H x W grid.
"""
R, _, H, W = uncertainty_map.shape
h_step = 1.0 / float(H)
w_step = 1.0 / float(W)
num_points = min(H * W, num_points)
point_indices = torch.topk(uncertainty_map.view(R, H * W), k=num_points, dim=1)[1]
point_coords = torch.zeros(R, num_points, 2, dtype=torch.float, device=uncertainty_map.device)
point_coords[:, :, 0] = w_step / 2.0 + (point_indices % W).to(torch.float) * w_step
point_coords[:, :, 1] = h_step / 2.0 + (point_indices // W).to(torch.float) * h_step
return point_indices, point_coords
def point_sample_fine_grained_features(features_list, feature_scales, boxes, point_coords):
"""
Get features from feature maps in `features_list` that correspond to specific point coordinates
inside each bounding box from `boxes`.
Args:
features_list (list[Tensor]): A list of feature map tensors to get features from.
feature_scales (list[float]): A list of scales for tensors in `features_list`.
boxes (list[Boxes]): A list of I Boxes objects that contain R_1 + ... + R_I = R boxes all
together.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_features (Tensor): A tensor of shape (R, C, P) that contains features sampled
from all features maps in feature_list for P sampled points for all R boxes in `boxes`.
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains image-level
coordinates of P points.
"""
cat_boxes = Boxes.cat(boxes)
num_boxes = [b.tensor.size(0) for b in boxes]
point_coords_wrt_image = get_point_coords_wrt_image(cat_boxes.tensor, point_coords)
split_point_coords_wrt_image = torch.split(point_coords_wrt_image, num_boxes)
point_features = []
for idx_img, point_coords_wrt_image_per_image in enumerate(split_point_coords_wrt_image):
point_features_per_image = []
for idx_feature, feature_map in enumerate(features_list):
h, w = feature_map.shape[-2:]
scale = shapes_to_tensor([w, h]) / feature_scales[idx_feature]
point_coords_scaled = point_coords_wrt_image_per_image / scale.to(feature_map.device)
point_features_per_image.append(
point_sample(
feature_map[idx_img].unsqueeze(0),
point_coords_scaled.unsqueeze(0),
align_corners=False,
)
.squeeze(0)
.transpose(1, 0)
)
point_features.append(cat(point_features_per_image, dim=1))
return cat(point_features, dim=0), point_coords_wrt_image
def get_point_coords_wrt_image(boxes_coords, point_coords):
"""
Convert box-normalized [0, 1] x [0, 1] point cooordinates to image-level coordinates.
Args:
boxes_coords (Tensor): A tensor of shape (R, 4) that contains bounding boxes.
coordinates.
point_coords (Tensor): A tensor of shape (R, P, 2) that contains
[0, 1] x [0, 1] box-normalized coordinates of the P sampled points.
Returns:
point_coords_wrt_image (Tensor): A tensor of shape (R, P, 2) that contains
image-normalized coordinates of P sampled points.
"""
with torch.no_grad():
point_coords_wrt_image = point_coords.clone()
point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (
boxes_coords[:, None, 2] - boxes_coords[:, None, 0]
)
point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (
boxes_coords[:, None, 3] - boxes_coords[:, None, 1]
)
point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]
point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]
return point_coords_wrt_image
def sample_point_labels(instances, point_coords):
"""
Sample point labels from ground truth mask given point_coords.
Args:
instances (list[Instances]): A list of N Instances, where N is the number of images
in the batch. So, i_th elememt of the list contains R_i objects and R_1 + ... + R_N is
equal to R. The ground-truth gt_masks in each instance will be used to compute labels.
points_coords (Tensor): A tensor of shape (R, P, 2), where R is the total number of
instances and P is the number of points for each instance. The coordinates are in
the absolute image pixel coordinate space, i.e. [0, H] x [0, W].
Returns:
Tensor: A tensor of shape (R, P) that contains the labels of P sampled points.
"""
with torch.no_grad():
gt_mask_logits = []
point_coords_splits = torch.split(
point_coords, [len(instances_per_image) for instances_per_image in instances]
)
for i, instances_per_image in enumerate(instances):
if len(instances_per_image) == 0:
continue
assert isinstance(
instances_per_image.gt_masks, BitMasks
), "Point head works with GT in 'bitmask' format. Set INPUT.MASK_FORMAT to 'bitmask'."
gt_bit_masks = instances_per_image.gt_masks.tensor
h, w = instances_per_image.gt_masks.image_size
scale = torch.tensor([w, h], dtype=torch.float, device=gt_bit_masks.device)
points_coord_grid_sample_format = point_coords_splits[i] / scale
gt_mask_logits.append(
point_sample(
gt_bit_masks.to(torch.float32).unsqueeze(1),
points_coord_grid_sample_format,
align_corners=False,
).squeeze(1)
)
point_labels = cat(gt_mask_logits)
return point_labels
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/modules/point_features.py |
# Code copy from PyTorch, modified by Xueyan Zou
import warnings
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
from torch.nn.parameter import Parameter
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Tensor,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in different forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
if has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_forward,
tens_ops,
query,
key,
value,
embed_dim_to_check,
num_heads,
in_proj_weight,
in_proj_bias,
bias_k,
bias_v,
add_zero_attn,
dropout_p,
out_proj_weight,
out_proj_bias,
training=training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=use_separate_proj_weight,
q_proj_weight=q_proj_weight,
k_proj_weight=k_proj_weight,
v_proj_weight=v_proj_weight,
static_k=static_k,
static_v=static_v,
)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if not use_separate_proj_weight:
if (query is key or torch.equal(query, key)) and (key is value or torch.equal(key, value)):
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif key is value or torch.equal(key, value):
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim : (embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2) :])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if attn_mask is not None:
assert (
attn_mask.dtype == torch.float32
or attn_mask.dtype == torch.float64
or attn_mask.dtype == torch.float16
or attn_mask.dtype == torch.uint8
or attn_mask.dtype == torch.bool
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 2D attn_mask is not correct.")
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError("The size of the 3D attn_mask is not correct.")
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
)
key_padding_mask = key_padding_mask.to(torch.bool)
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
# assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1))
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1),
float("-inf"),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
# This class exists solely for Transformer; it has an annotation stating
# that bias is never None, which appeases TorchScript
class _LinearWithBias(nn.Linear):
bias: Tensor # type: ignore
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__(in_features, out_features, bias=True) # type: ignore
class MultiheadAttention(nn.Module):
r"""Allows the model to jointly attend to information
from different representation subspaces.
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
.. math::
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
Args:
embed_dim: total dimension of the model.
num_heads: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
bias: add bias as module parameter. Default: True.
add_bias_kv: add bias to the key and value sequences at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
kdim: total number of features in key. Default: None.
vdim: total number of features in value. Default: None.
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
to :attr:`embed_dim` such that query, key, and value have the same
number of features.
Examples::
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
"""
bias_k: Optional[torch.Tensor]
bias_v: Optional[torch.Tensor]
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.register_parameter('in_proj_weight', None)
else:
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
self.register_parameter('q_proj_weight', None)
self.register_parameter('k_proj_weight', None)
self.register_parameter('v_proj_weight', None)
if bias:
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
if add_bias_kv:
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.)
constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(MultiheadAttention, self).__setstate__(state)
def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True, attn_mask: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. When given a binary mask and a value is True,
the corresponding value on the attention layer will be ignored. When given
a byte mask and a value is non-zero, the corresponding value on the attention
layer will be ignored
need_weights: output attn_output_weights.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
Shapes for inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the position
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: if a 2D mask: :math:`(L, S)` where L is the target sequence length, S is the
source sequence length.
If a 3D mask: :math:`(N\cdot\text{num\_heads}, L, S)` where N is the batch size, L is the target sequence
length, S is the source sequence length. ``attn_mask`` ensure that position i is allowed to attend
the unmasked positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
Shapes for outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not self._qkv_same_embed_dim:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
else:
return multi_head_attention_forward(
query, key, value, self.embed_dim, self.num_heads,
self.in_proj_weight, self.in_proj_bias,
self.bias_k, self.bias_v, self.add_zero_attn,
self.dropout, self.out_proj.weight, self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask, need_weights=need_weights,
attn_mask=attn_mask) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/modules/attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
## Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x, mask=None):
if mask is None:
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=x.dtype)
x_embed = not_mask.cumsum(2, dtype=x.dtype)
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=x.dtype, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def __repr__(self, _repr_indent=4):
head = "Positional encoding " + self.__class__.__name__
body = [
"num_pos_feats: {}".format(self.num_pos_feats),
"temperature: {}".format(self.temperature),
"normalize: {}".format(self.normalize),
"scale: {}".format(self.scale),
]
# _repr_indent = 4
lines = [head] + [" " * _repr_indent + line for line in body]
return "\n".join(lines)
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/modules/position_encoding.py |
from .position_encoding import *
from .attention import *
from .postprocessing import *
from .point_features import * | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/modules/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch.nn import functional as F
from detectron2.structures import Instances, ROIMasks
# perhaps should rename to "resize_instance"
def detector_postprocess(
results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
if isinstance(output_width, torch.Tensor):
# This shape might (but not necessarily) be tensors during tracing.
# Converts integer tensors to float temporaries to ensure true
# division is performed when computing scale_x and scale_y.
output_width_tmp = output_width.float()
output_height_tmp = output_height.float()
new_size = torch.stack([output_height, output_width])
else:
new_size = (output_height, output_width)
output_width_tmp = output_width
output_height_tmp = output_height
scale_x, scale_y = (
output_width_tmp / results.image_size[1],
output_height_tmp / results.image_size[0],
)
results = Instances(new_size, **results.get_fields())
if results.has("pred_boxes"):
output_boxes = results.pred_boxes
elif results.has("proposal_boxes"):
output_boxes = results.proposal_boxes
else:
output_boxes = None
assert output_boxes is not None, "Predictions must contain boxes!"
output_boxes.scale(scale_x, scale_y)
output_boxes.clip(results.image_size)
results = results[output_boxes.nonempty()]
if results.has("pred_masks"):
if isinstance(results.pred_masks, ROIMasks):
roi_masks = results.pred_masks
else:
# pred_masks is a tensor of shape (N, 1, M, M)
roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
results.pred_masks = roi_masks.to_bitmasks(
results.pred_boxes, output_height, output_width, mask_threshold
).tensor # TODO return ROIMasks/BitMask object in the future
if results.has("pred_keypoints"):
results.pred_keypoints[:, :, 0] *= scale_x
results.pred_keypoints[:, :, 1] *= scale_y
return results
def bbox_postprocess(result, input_size, img_size, output_height, output_width):
"""
result: [xc,yc,w,h] range [0,1] to [x1,y1,x2,y2] range [0,w], [0,h]
"""
if result is None:
return None
scale = torch.tensor([input_size[1], input_size[0], input_size[1], input_size[0]])[None,:].to(result.device)
result = result.sigmoid() * scale
x1,y1,x2,y2 = result[:,0] - result[:,2]/2, result[:,1] - result[:,3]/2, result[:,0] + result[:,2]/2, result[:,1] + result[:,3]/2
h,w = img_size
x1 = x1.clamp(min=0, max=w)
y1 = y1.clamp(min=0, max=h)
x2 = x2.clamp(min=0, max=w)
y2 = y2.clamp(min=0, max=h)
box = torch.stack([x1,y1,x2,y2]).permute(1,0)
scale = torch.tensor([output_width/w, output_height/h, output_width/w, output_height/h])[None,:].to(result.device)
box = box*scale
return box
def sem_seg_postprocess(result, img_size, output_height, output_width):
"""
Return semantic segmentation predictions in the original resolution.
The input images are often resized when entering semantic segmentor. Moreover, in same
cases, they also padded inside segmentor to be divisible by maximum network stride.
As a result, we often need the predictions of the segmentor in a different
resolution from its inputs.
Args:
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
where C is the number of classes, and H, W are the height and width of the prediction.
img_size (tuple): image size that segmentor is taking as input.
output_height, output_width: the desired output resolution.
Returns:
semantic segmentation prediction (Tensor): A tensor of the shape
(C, output_height, output_width) that contains per-pixel soft predictions.
"""
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
result = F.interpolate(
result, size=(output_height, output_width), mode="bilinear", align_corners=False
)[0]
return result
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/modules/postprocessing.py |
from .registry import model_entrypoints
from .registry import is_model
from .backbone import *
def build_backbone(config, **kwargs):
model_name = config['MODEL']['BACKBONE']['NAME']
if not is_model(model_name):
raise ValueError(f'Unkown model: {model_name}')
return model_entrypoints(model_name)(config, **kwargs) | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/build.py |
import os
import itertools
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from collections import OrderedDict
from einops import rearrange
from timm.models.layers import DropPath, trunc_normal_
from detectron2.utils.file_io import PathManager
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
from .registry import register_backbone
logger = logging.getLogger(__name__)
class MySequential(nn.Sequential):
def forward(self, *inputs):
for module in self._modules.values():
if type(inputs) == tuple:
inputs = module(*inputs)
else:
inputs = module(inputs)
return inputs
class PreNorm(nn.Module):
def __init__(self, norm, fn, drop_path=None):
super().__init__()
self.norm = norm
self.fn = fn
self.drop_path = drop_path
def forward(self, x, *args, **kwargs):
shortcut = x
if self.norm != None:
x, size = self.fn(self.norm(x), *args, **kwargs)
else:
x, size = self.fn(x, *args, **kwargs)
if self.drop_path:
x = self.drop_path(x)
x = shortcut + x
return x, size
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.net = nn.Sequential(OrderedDict([
("fc1", nn.Linear(in_features, hidden_features)),
("act", act_layer()),
("fc2", nn.Linear(hidden_features, out_features))
]))
def forward(self, x, size):
return self.net(x), size
class DepthWiseConv2d(nn.Module):
def __init__(
self,
dim_in,
kernel_size,
padding,
stride,
bias=True,
):
super().__init__()
self.dw = nn.Conv2d(
dim_in, dim_in,
kernel_size=kernel_size,
padding=padding,
groups=dim_in,
stride=stride,
bias=bias
)
def forward(self, x, size):
B, N, C = x.shape
H, W = size
assert N == H * W
x = self.dw(x.transpose(1, 2).view(B, C, H, W))
size = (x.size(-2), x.size(-1))
x = x.flatten(2).transpose(1, 2)
return x, size
class ConvEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(
self,
patch_size=7,
in_chans=3,
embed_dim=64,
stride=4,
padding=2,
norm_layer=None,
pre_norm=True
):
super().__init__()
self.patch_size = patch_size
self.proj = nn.Conv2d(
in_chans, embed_dim,
kernel_size=patch_size,
stride=stride,
padding=padding
)
dim_norm = in_chans if pre_norm else embed_dim
self.norm = norm_layer(dim_norm) if norm_layer else None
self.pre_norm = pre_norm
def forward(self, x, size):
H, W = size
if len(x.size()) == 3:
if self.norm and self.pre_norm:
x = self.norm(x)
x = rearrange(
x, 'b (h w) c -> b c h w',
h=H, w=W
)
x = self.proj(x)
_, _, H, W = x.shape
x = rearrange(x, 'b c h w -> b (h w) c')
if self.norm and not self.pre_norm:
x = self.norm(x)
return x, (H, W)
class ChannelAttention(nn.Module):
def __init__(self, dim, groups=8, qkv_bias=True):
super().__init__()
self.groups = groups
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
def forward(self, x, size):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.groups, C // self.groups).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * (N ** -0.5)
attention = q.transpose(-1, -2) @ k
attention = attention.softmax(dim=-1)
x = (attention @ v.transpose(-1, -2)).transpose(-1, -2)
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
return x, size
class ChannelBlock(nn.Module):
def __init__(self, dim, groups, mlp_ratio=4., qkv_bias=True,
drop_path_rate=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm,
conv_at_attn=True, conv_at_ffn=True):
super().__init__()
drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.conv1 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_attn else None
self.channel_attn = PreNorm(
norm_layer(dim),
ChannelAttention(dim, groups=groups, qkv_bias=qkv_bias),
drop_path
)
self.conv2 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_ffn else None
self.ffn = PreNorm(
norm_layer(dim),
Mlp(in_features=dim, hidden_features=int(dim*mlp_ratio), act_layer=act_layer),
drop_path
)
def forward(self, x, size):
if self.conv1:
x, size = self.conv1(x, size)
x, size = self.channel_attn(x, size)
if self.conv2:
x, size = self.conv2(x, size)
x, size = self.ffn(x, size)
return x, size
def window_partition(x, window_size: int):
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size: int, H: int, W: int):
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
def __init__(self, dim, num_heads, window_size, qkv_bias=True):
super().__init__()
self.dim = dim
self.window_size = window_size
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, size):
H, W = size
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
x = window_partition(x, self.window_size)
x = x.view(-1, self.window_size * self.window_size, C)
# W-MSA/SW-MSA
# attn_windows = self.attn(x_windows)
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
attn = self.softmax(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
# merge windows
x = x.view(
-1, self.window_size, self.window_size, C
)
x = window_reverse(x, self.window_size, Hp, Wp)
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
return x, size
class SpatialBlock(nn.Module):
def __init__(self, dim, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, drop_path_rate=0., act_layer=nn.GELU,
norm_layer=nn.LayerNorm, conv_at_attn=True, conv_at_ffn=True):
super().__init__()
drop_path = DropPath(drop_path_rate) if drop_path_rate > 0. else nn.Identity()
self.conv1 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_attn else None
self.window_attn = PreNorm(
norm_layer(dim),
WindowAttention(dim, num_heads, window_size, qkv_bias=qkv_bias),
drop_path
)
self.conv2 = PreNorm(None, DepthWiseConv2d(dim, 3, 1, 1)) if conv_at_ffn else None
self.ffn = PreNorm(
norm_layer(dim),
Mlp(in_features=dim, hidden_features=int(dim*mlp_ratio), act_layer=act_layer),
drop_path
)
def forward(self, x, size):
if self.conv1:
x, size = self.conv1(x, size)
x, size = self.window_attn(x, size)
if self.conv2:
x, size = self.conv2(x, size)
x, size = self.ffn(x, size)
return x, size
class DaViT(nn.Module):
""" DaViT: Dual-Attention Transformer
Args:
img_size (int): Image size, Default: 224.
in_chans (int): Number of input image channels. Default: 3.
num_classes (int): Number of classes for classification head. Default: 1000.
patch_size (tuple(int)): Patch size of convolution in different stages. Default: (7, 2, 2, 2).
patch_stride (tuple(int)): Patch stride of convolution in different stages. Default: (4, 2, 2, 2).
patch_padding (tuple(int)): Patch padding of convolution in different stages. Default: (3, 0, 0, 0).
patch_prenorm (tuple(bool)): If True, perform norm before convlution layer. Default: (True, False, False, False).
embed_dims (tuple(int)): Patch embedding dimension in different stages. Default: (64, 128, 192, 256).
num_heads (tuple(int)): Number of spatial attention heads in different stages. Default: (4, 8, 12, 16).
num_groups (tuple(int)): Number of channel groups in different stages. Default: (4, 8, 12, 16).
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True.
drop_path_rate (float): Stochastic depth rate. Default: 0.1.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
enable_checkpoint (bool): If True, enable checkpointing. Default: False.
conv_at_attn (bool): If True, performe depthwise convolution before attention layer. Default: True.
conv_at_ffn (bool): If True, performe depthwise convolution before ffn layer. Default: True.
"""
def __init__(
self,
img_size=224,
in_chans=3,
num_classes=1000,
depths=(1, 1, 3, 1),
patch_size=(7, 2, 2, 2),
patch_stride=(4, 2, 2, 2),
patch_padding=(3, 0, 0, 0),
patch_prenorm=(False, False, False, False),
embed_dims=(64, 128, 192, 256),
num_heads=(3, 6, 12, 24),
num_groups=(3, 6, 12, 24),
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
enable_checkpoint=False,
conv_at_attn=True,
conv_at_ffn=True,
out_indices=[],
):
super().__init__()
self.num_classes = num_classes
self.embed_dims = embed_dims
self.num_heads = num_heads
self.num_groups = num_groups
self.num_stages = len(self.embed_dims)
self.enable_checkpoint = enable_checkpoint
assert self.num_stages == len(self.num_heads) == len(self.num_groups)
num_stages = len(embed_dims)
self.img_size = img_size
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)*2)]
depth_offset = 0
convs = []
blocks = []
for i in range(num_stages):
conv_embed = ConvEmbed(
patch_size=patch_size[i],
stride=patch_stride[i],
padding=patch_padding[i],
in_chans=in_chans if i == 0 else self.embed_dims[i - 1],
embed_dim=self.embed_dims[i],
norm_layer=norm_layer,
pre_norm=patch_prenorm[i]
)
convs.append(conv_embed)
print(f'=> Depth offset in stage {i}: {depth_offset}')
block = MySequential(
*[
MySequential(OrderedDict([
(
'spatial_block', SpatialBlock(
embed_dims[i],
num_heads[i],
window_size,
drop_path_rate=dpr[depth_offset+j*2],
qkv_bias=qkv_bias,
mlp_ratio=mlp_ratio,
conv_at_attn=conv_at_attn,
conv_at_ffn=conv_at_ffn,
)
),
(
'channel_block', ChannelBlock(
embed_dims[i],
num_groups[i],
drop_path_rate=dpr[depth_offset+j*2+1],
qkv_bias=qkv_bias,
mlp_ratio=mlp_ratio,
conv_at_attn=conv_at_attn,
conv_at_ffn=conv_at_ffn,
)
)
])) for j in range(depths[i])
]
)
blocks.append(block)
depth_offset += depths[i]*2
self.convs = nn.ModuleList(convs)
self.blocks = nn.ModuleList(blocks)
self.out_indices = out_indices
# self.norms = norm_layer(self.embed_dims[-1])
# self.avgpool = nn.AdaptiveAvgPool1d(1)
# self.head = nn.Linear(self.embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
@property
def dim_out(self):
return self.embed_dims[-1]
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.02)
for name, _ in m.named_parameters():
if name in ['bias']:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0)
def _try_remap_keys(self, pretrained_dict):
remap_keys = {
"conv_embeds": "convs",
"main_blocks": "blocks",
"0.cpe.0.proj": "spatial_block.conv1.fn.dw",
"0.attn": "spatial_block.window_attn.fn",
"0.cpe.1.proj": "spatial_block.conv2.fn.dw",
"0.mlp": "spatial_block.ffn.fn.net",
"1.cpe.0.proj": "channel_block.conv1.fn.dw",
"1.attn": "channel_block.channel_attn.fn",
"1.cpe.1.proj": "channel_block.conv2.fn.dw",
"1.mlp": "channel_block.ffn.fn.net",
"0.norm1": "spatial_block.window_attn.norm",
"0.norm2": "spatial_block.ffn.norm",
"1.norm1": "channel_block.channel_attn.norm",
"1.norm2": "channel_block.ffn.norm"
}
full_key_mappings = {}
for k in pretrained_dict.keys():
old_k = k
for remap_key in remap_keys.keys():
if remap_key in k:
print(f'=> Repace {remap_key} with {remap_keys[remap_key]}')
k = k.replace(remap_key, remap_keys[remap_key])
full_key_mappings[old_k] = k
return full_key_mappings
def from_state_dict(self, pretrained_dict, pretrained_layers=[], verbose=True):
model_dict = self.state_dict()
stripped_key = lambda x: x[14:] if x.startswith('image_encoder.') else x
full_key_mappings = self._try_remap_keys(pretrained_dict)
pretrained_dict = {
stripped_key(full_key_mappings[k]): v for k, v in pretrained_dict.items()
if stripped_key(full_key_mappings[k]) in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] == '*'
)
if need_init:
if verbose:
print(f'=> init {k} from pretrained state dict')
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
def from_pretrained(self, pretrained='', pretrained_layers=[], verbose=True):
if os.path.isfile(pretrained):
print(f'=> loading pretrained model {pretrained}')
pretrained_dict = torch.load(pretrained, map_location='cpu')
self.from_state_dict(pretrained_dict, pretrained_layers, verbose)
def forward_features(self, x):
input_size = (x.size(2), x.size(3))
outs = {}
for i, (conv, block) in enumerate(zip(self.convs, self.blocks)):
x, input_size = conv(x, input_size)
if self.enable_checkpoint:
x, input_size = checkpoint.checkpoint(block, x, input_size)
else:
x, input_size = block(x, input_size)
if i in self.out_indices:
out = x.view(-1, *input_size, self.embed_dims[i]).permute(0, 3, 1, 2).contiguous()
outs["res{}".format(i + 2)] = out
if len(self.out_indices) == 0:
outs["res5"] = x.view(-1, *input_size, self.embed_dims[-1]).permute(0, 3, 1, 2).contiguous()
return outs
def forward(self, x):
x = self.forward_features(x)
# x = self.head(x)
return x
class D2DaViT(DaViT, Backbone):
def __init__(self, cfg, input_shape):
spec = cfg['BACKBONE']['DAVIT']
super().__init__(
num_classes=0,
depths=spec['DEPTHS'],
embed_dims=spec['DIM_EMBED'],
num_heads=spec['NUM_HEADS'],
num_groups=spec['NUM_GROUPS'],
patch_size=spec['PATCH_SIZE'],
patch_stride=spec['PATCH_STRIDE'],
patch_padding=spec['PATCH_PADDING'],
patch_prenorm=spec['PATCH_PRENORM'],
drop_path_rate=spec['DROP_PATH_RATE'],
img_size=input_shape,
window_size=spec.get('WINDOW_SIZE', 7),
enable_checkpoint=spec.get('ENABLE_CHECKPOINT', False),
conv_at_attn=spec.get('CONV_AT_ATTN', True),
conv_at_ffn=spec.get('CONV_AT_FFN', True),
out_indices=spec.get('OUT_INDICES', []),
)
self._out_features = cfg['BACKBONE']['DAVIT']['OUT_FEATURES']
self._out_feature_strides = {
"res2": 4,
"res3": 8,
"res4": 16,
"res5": 32,
}
self._out_feature_channels = {
"res2": self.embed_dims[0],
"res3": self.embed_dims[1],
"res4": self.embed_dims[2],
"res5": self.embed_dims[3],
}
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert (
x.dim() == 4
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
y = super().forward(x)
for k in y.keys():
if k in self._out_features:
outputs[k] = y[k]
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
@property
def size_divisibility(self):
return 32
@register_backbone
def get_davit_backbone(cfg):
davit = D2DaViT(cfg['MODEL'], 224)
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
logger.info(f'=> init from {filename}')
davit.from_pretrained(
filename,
cfg['MODEL']['BACKBONE']['DAVIT'].get('PRETRAINED_LAYERS', ['*']),
cfg['VERBOSE'])
return davit | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/davit.py |
# --------------------------------------------------------
# FocalNet for Semantic Segmentation
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Jianwei Yang
# --------------------------------------------------------
import math
import time
import numpy as np
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.utils.file_io import PathManager
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
from .registry import register_backbone
logger = logging.getLogger(__name__)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class FocalModulation(nn.Module):
""" Focal Modulation
Args:
dim (int): Number of input channels.
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
focal_level (int): Number of focal levels
focal_window (int): Focal window size at focal level 1
focal_factor (int, default=2): Step to increase the focal window
use_postln (bool, default=False): Whether use post-modulation layernorm
"""
def __init__(self, dim, proj_drop=0., focal_level=2, focal_window=7, focal_factor=2, use_postln=False, use_postln_in_modulation=False, scaling_modulator=False):
super().__init__()
self.dim = dim
# specific args for focalv3
self.focal_level = focal_level
self.focal_window = focal_window
self.focal_factor = focal_factor
self.use_postln_in_modulation = use_postln_in_modulation
self.scaling_modulator = scaling_modulator
self.f = nn.Linear(dim, 2*dim+(self.focal_level+1), bias=True)
self.h = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0, groups=1, bias=True)
self.act = nn.GELU()
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.focal_layers = nn.ModuleList()
if self.use_postln_in_modulation:
self.ln = nn.LayerNorm(dim)
for k in range(self.focal_level):
kernel_size = self.focal_factor*k + self.focal_window
self.focal_layers.append(
nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1, groups=dim,
padding=kernel_size//2, bias=False),
nn.GELU(),
)
)
def forward(self, x):
""" Forward function.
Args:
x: input features with shape of (B, H, W, C)
"""
B, nH, nW, C = x.shape
x = self.f(x)
x = x.permute(0, 3, 1, 2).contiguous()
q, ctx, gates = torch.split(x, (C, C, self.focal_level+1), 1)
ctx_all = 0
for l in range(self.focal_level):
ctx = self.focal_layers[l](ctx)
ctx_all = ctx_all + ctx*gates[:, l:l+1]
ctx_global = self.act(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
ctx_all = ctx_all + ctx_global*gates[:,self.focal_level:]
if self.scaling_modulator:
ctx_all = ctx_all / (self.focal_level + 1)
x_out = q * self.h(ctx_all)
x_out = x_out.permute(0, 2, 3, 1).contiguous()
if self.use_postln_in_modulation:
x_out = self.ln(x_out)
x_out = self.proj(x_out)
x_out = self.proj_drop(x_out)
return x_out
class FocalModulationBlock(nn.Module):
""" Focal Modulation Block.
Args:
dim (int): Number of input channels.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
focal_level (int): number of focal levels
focal_window (int): focal kernel size at level 1
"""
def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm,
focal_level=2, focal_window=9,
use_postln=False, use_postln_in_modulation=False,
scaling_modulator=False,
use_layerscale=False,
layerscale_value=1e-4):
super().__init__()
self.dim = dim
self.mlp_ratio = mlp_ratio
self.focal_window = focal_window
self.focal_level = focal_level
self.use_postln = use_postln
self.use_layerscale = use_layerscale
self.norm1 = norm_layer(dim)
self.modulation = FocalModulation(
dim, focal_window=self.focal_window, focal_level=self.focal_level, proj_drop=drop, use_postln_in_modulation=use_postln_in_modulation, scaling_modulator=scaling_modulator
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.H = None
self.W = None
self.gamma_1 = 1.0
self.gamma_2 = 1.0
if self.use_layerscale:
self.gamma_1 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
shortcut = x
if not self.use_postln:
x = self.norm1(x)
x = x.view(B, H, W, C)
# FM
x = self.modulation(x).view(B, H * W, C)
if self.use_postln:
x = self.norm1(x)
# FFN
x = shortcut + self.drop_path(self.gamma_1 * x)
if self.use_postln:
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
else:
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class BasicLayer(nn.Module):
""" A basic focal modulation layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
focal_level (int): Number of focal levels
focal_window (int): Focal window size at focal level 1
use_conv_embed (bool): Use overlapped convolution for patch embedding or now. Default: False
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self,
dim,
depth,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
downsample=None,
focal_window=9,
focal_level=2,
use_conv_embed=False,
use_postln=False,
use_postln_in_modulation=False,
scaling_modulator=False,
use_layerscale=False,
use_checkpoint=False
):
super().__init__()
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
FocalModulationBlock(
dim=dim,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
focal_window=focal_window,
focal_level=focal_level,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
scaling_modulator=scaling_modulator,
use_layerscale=use_layerscale,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(
patch_size=2,
in_chans=dim, embed_dim=2*dim,
use_conv_embed=use_conv_embed,
norm_layer=norm_layer,
is_stem=False
)
else:
self.downsample = None
def forward(self, x, H, W):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x_reshaped = x.transpose(1, 2).view(x.shape[0], x.shape[-1], H, W)
x_down = self.downsample(x_reshaped)
x_down = x_down.flatten(2).transpose(1, 2)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
Args:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
use_conv_embed (bool): Whether use overlapped convolution for patch embedding. Default: False
is_stem (bool): Is the stem block or not.
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, use_conv_embed=False, is_stem=False):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
if use_conv_embed:
# if we choose to use conv embedding, then we treat the stem and non-stem differently
if is_stem:
kernel_size = 7; padding = 2; stride = 4
else:
kernel_size = 3; padding = 1; stride = 2
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
_, _, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class FocalNet(nn.Module):
""" FocalNet backbone.
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
drop_rate (float): Dropout rate.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
focal_levels (Sequence[int]): Number of focal levels at four stages
focal_windows (Sequence[int]): Focal window sizes at first focal level at four stages
use_conv_embed (bool): Whether use overlapped convolution for patch embedding
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
pretrain_img_size=1600,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
patch_norm=True,
out_indices=[0, 1, 2, 3],
frozen_stages=-1,
focal_levels=[2,2,2,2],
focal_windows=[9,9,9,9],
use_conv_embed=False,
use_postln=False,
use_postln_in_modulation=False,
scaling_modulator=False,
use_layerscale=False,
use_checkpoint=False,
):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
use_conv_embed=use_conv_embed, is_stem=True)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_window=focal_windows[i_layer],
focal_level=focal_levels[i_layer],
use_conv_embed=use_conv_embed,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
scaling_modulator=scaling_modulator,
use_layerscale=use_layerscale,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
self.num_features = num_features
# add a norm layer for each output
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def load_weights(self, pretrained_dict=None, pretrained_layers=[], verbose=True):
model_dict = self.state_dict()
missed_dict = [k for k in model_dict.keys() if k not in pretrained_dict]
logger.info(f'=> Missed keys {missed_dict}')
unexpected_dict = [k for k in pretrained_dict.keys() if k not in model_dict]
logger.info(f'=> Unexpected keys {unexpected_dict}')
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
(
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] == '*'
)
and 'relative_position_index' not in k
and 'attn_mask' not in k
)
if need_init:
# if verbose:
# logger.info(f'=> init {k} from {pretrained}')
if ('pool_layers' in k) or ('focal_layers' in k) and v.size() != model_dict[k].size():
table_pretrained = v
table_current = model_dict[k]
fsize1 = table_pretrained.shape[2]
fsize2 = table_current.shape[2]
# NOTE: different from interpolation used in self-attention, we use padding or clipping for focal conv
if fsize1 < fsize2:
table_pretrained_resized = torch.zeros(table_current.shape)
table_pretrained_resized[:, :, (fsize2-fsize1)//2:-(fsize2-fsize1)//2, (fsize2-fsize1)//2:-(fsize2-fsize1)//2] = table_pretrained
v = table_pretrained_resized
elif fsize1 > fsize2:
table_pretrained_resized = table_pretrained[:, :, (fsize1-fsize2)//2:-(fsize1-fsize2)//2, (fsize1-fsize2)//2:-(fsize1-fsize2)//2]
v = table_pretrained_resized
if ("modulation.f" in k or "pre_conv" in k):
table_pretrained = v
table_current = model_dict[k]
if table_pretrained.shape != table_current.shape:
if len(table_pretrained.shape) == 2:
dim = table_pretrained.shape[1]
assert table_current.shape[1] == dim
L1 = table_pretrained.shape[0]
L2 = table_current.shape[0]
if L1 < L2:
table_pretrained_resized = torch.zeros(table_current.shape)
# copy for linear project
table_pretrained_resized[:2*dim] = table_pretrained[:2*dim]
# copy for global token gating
table_pretrained_resized[-1] = table_pretrained[-1]
# copy for first multiple focal levels
table_pretrained_resized[2*dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
# reassign pretrained weights
v = table_pretrained_resized
elif L1 > L2:
raise NotImplementedError
elif len(table_pretrained.shape) == 1:
dim = table_pretrained.shape[0]
L1 = table_pretrained.shape[0]
L2 = table_current.shape[0]
if L1 < L2:
table_pretrained_resized = torch.zeros(table_current.shape)
# copy for linear project
table_pretrained_resized[:dim] = table_pretrained[:dim]
# copy for global token gating
table_pretrained_resized[-1] = table_pretrained[-1]
# copy for first multiple focal levels
# table_pretrained_resized[dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
# reassign pretrained weights
v = table_pretrained_resized
elif L1 > L2:
raise NotImplementedError
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
def forward(self, x):
"""Forward function."""
tic = time.time()
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = {}
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs["res{}".format(i + 2)] = out
if len(self.out_indices) == 0:
outs["res5"] = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
toc = time.time()
return outs
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(FocalNet, self).train(mode)
self._freeze_stages()
class D2FocalNet(FocalNet, Backbone):
def __init__(self, cfg, input_shape):
pretrain_img_size = cfg['BACKBONE']['FOCAL']['PRETRAIN_IMG_SIZE']
patch_size = cfg['BACKBONE']['FOCAL']['PATCH_SIZE']
in_chans = 3
embed_dim = cfg['BACKBONE']['FOCAL']['EMBED_DIM']
depths = cfg['BACKBONE']['FOCAL']['DEPTHS']
mlp_ratio = cfg['BACKBONE']['FOCAL']['MLP_RATIO']
drop_rate = cfg['BACKBONE']['FOCAL']['DROP_RATE']
drop_path_rate = cfg['BACKBONE']['FOCAL']['DROP_PATH_RATE']
norm_layer = nn.LayerNorm
patch_norm = cfg['BACKBONE']['FOCAL']['PATCH_NORM']
use_checkpoint = cfg['BACKBONE']['FOCAL']['USE_CHECKPOINT']
out_indices = cfg['BACKBONE']['FOCAL']['OUT_INDICES']
scaling_modulator = cfg['BACKBONE']['FOCAL'].get('SCALING_MODULATOR', False)
super().__init__(
pretrain_img_size,
patch_size,
in_chans,
embed_dim,
depths,
mlp_ratio,
drop_rate,
drop_path_rate,
norm_layer,
patch_norm,
out_indices,
focal_levels=cfg['BACKBONE']['FOCAL']['FOCAL_LEVELS'],
focal_windows=cfg['BACKBONE']['FOCAL']['FOCAL_WINDOWS'],
use_conv_embed=cfg['BACKBONE']['FOCAL']['USE_CONV_EMBED'],
use_postln=cfg['BACKBONE']['FOCAL']['USE_POSTLN'],
use_postln_in_modulation=cfg['BACKBONE']['FOCAL']['USE_POSTLN_IN_MODULATION'],
scaling_modulator=scaling_modulator,
use_layerscale=cfg['BACKBONE']['FOCAL']['USE_LAYERSCALE'],
use_checkpoint=use_checkpoint,
)
self._out_features = cfg['BACKBONE']['FOCAL']['OUT_FEATURES']
self._out_feature_strides = {
"res2": 4,
"res3": 8,
"res4": 16,
"res5": 32,
}
self._out_feature_channels = {
"res2": self.num_features[0],
"res3": self.num_features[1],
"res4": self.num_features[2],
"res5": self.num_features[3],
}
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert (
x.dim() == 4
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
y = super().forward(x)
for k in y.keys():
if k in self._out_features:
outputs[k] = y[k]
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
@property
def size_divisibility(self):
return 32
@register_backbone
def get_focal_backbone(cfg):
focal = D2FocalNet(cfg['MODEL'], 224)
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
logger.info(f'=> init from {filename}')
with PathManager.open(filename, "rb") as f:
ckpt = torch.load(f)['model']
focal.load_weights(ckpt, cfg['MODEL']['BACKBONE']['FOCAL'].get('PRETRAINED_LAYERS', ['*']), cfg['VERBOSE'])
return focal | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/focal.py |
_model_entrypoints = {}
def register_backbone(fn):
module_name_split = fn.__module__.split('.')
model_name = module_name_split[-1]
_model_entrypoints[model_name] = fn
return fn
def model_entrypoints(model_name):
return _model_entrypoints[model_name]
def is_model(model_name):
return model_name in _model_entrypoints
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/registry.py |
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu, Yutong Lin, Yixuan Wei
# --------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation/blob/main/mmseg/models/backbones/swin_transformer.py
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.modeling import Backbone, ShapeSpec
from detectron2.utils.file_io import PathManager
from .registry import register_backbone
logger = logging.getLogger(__name__)
class Mlp(nn.Module):
"""Multilayer perceptron."""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
)
self.H = None
self.W = None
def forward(self, x, mask_matrix):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
mask_matrix: Attention mask for cyclic shift.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
# HACK model will not upsampling
# if min([H, W]) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
# self.shift_size = 0
# self.window_size = min([H,W])
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
"""Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (int): Local window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
depth,
num_heads,
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.window_size = window_size
self.shift_size = window_size // 2
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
).type(x.dtype)
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x_down = self.downsample(x, H, W)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
class PatchEmbed(nn.Module):
"""Image to Patch Embedding
Args:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class SwinTransformer(nn.Module):
"""Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
pretrain_img_size=224,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
use_checkpoint=False,
):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
# absolute position embedding
if self.ape:
pretrain_img_size = to_2tuple(pretrain_img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [
pretrain_img_size[0] // patch_size[0],
pretrain_img_size[1] // patch_size[1],
]
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
)
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
self.num_features = num_features
# add a norm layer for each output
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def load_weights(self, pretrained_dict=None, pretrained_layers=[], verbose=True):
model_dict = self.state_dict()
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
(
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] == '*'
)
and 'relative_position_index' not in k
and 'attn_mask' not in k
)
if need_init:
# if verbose:
# logger.info(f'=> init {k} from {pretrained}')
if 'relative_position_bias_table' in k and v.size() != model_dict[k].size():
relative_position_bias_table_pretrained = v
relative_position_bias_table_current = model_dict[k]
L1, nH1 = relative_position_bias_table_pretrained.size()
L2, nH2 = relative_position_bias_table_current.size()
if nH1 != nH2:
logger.info(f"Error in loading {k}, passing")
else:
if L1 != L2:
logger.info(
'=> load_pretrained: resized variant: {} to {}'
.format((L1, nH1), (L2, nH2))
)
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2),
mode='bicubic')
v = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
if 'absolute_pos_embed' in k and v.size() != model_dict[k].size():
absolute_pos_embed_pretrained = v
absolute_pos_embed_current = model_dict[k]
_, L1, C1 = absolute_pos_embed_pretrained.size()
_, L2, C2 = absolute_pos_embed_current.size()
if C1 != C1:
logger.info(f"Error in loading {k}, passing")
else:
if L1 != L2:
logger.info(
'=> load_pretrained: resized variant: {} to {}'
.format((1, L1, C1), (1, L2, C2))
)
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(-1, S1, S1, C1)
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(0, 3, 1, 2)
absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(
absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')
v = absolute_pos_embed_pretrained_resized.permute(0, 2, 3, 1).flatten(1, 2)
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
def forward(self, x):
"""Forward function."""
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
)
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
else:
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = {}
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f"norm{i}")
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs["res{}".format(i + 2)] = out
if len(self.out_indices) == 0:
outs["res5"] = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
return outs
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
class D2SwinTransformer(SwinTransformer, Backbone):
def __init__(self, cfg, pretrain_img_size, patch_size, in_chans, embed_dim,
depths, num_heads, window_size, mlp_ratio, qkv_bias, qk_scale,
drop_rate, attn_drop_rate, drop_path_rate, norm_layer, ape,
patch_norm, out_indices, use_checkpoint):
super().__init__(
pretrain_img_size,
patch_size,
in_chans,
embed_dim,
depths,
num_heads,
window_size,
mlp_ratio,
qkv_bias,
qk_scale,
drop_rate,
attn_drop_rate,
drop_path_rate,
norm_layer,
ape,
patch_norm,
out_indices,
use_checkpoint=use_checkpoint,
)
self._out_features = cfg['OUT_FEATURES']
self._out_feature_strides = {
"res2": 4,
"res3": 8,
"res4": 16,
"res5": 32,
}
self._out_feature_channels = {
"res2": self.num_features[0],
"res3": self.num_features[1],
"res4": self.num_features[2],
"res5": self.num_features[3],
}
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert (
x.dim() == 4
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
y = super().forward(x)
for k in y.keys():
if k in self._out_features:
outputs[k] = y[k]
return outputs
def output_shape(self):
feature_names = list(set(self._out_feature_strides.keys()) & set(self._out_features))
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in feature_names
}
@property
def size_divisibility(self):
return 32
@register_backbone
def get_swin_backbone(cfg):
swin_cfg = cfg['MODEL']['BACKBONE']['SWIN']
pretrain_img_size = swin_cfg['PRETRAIN_IMG_SIZE']
patch_size = swin_cfg['PATCH_SIZE']
in_chans = 3
embed_dim = swin_cfg['EMBED_DIM']
depths = swin_cfg['DEPTHS']
num_heads = swin_cfg['NUM_HEADS']
window_size = swin_cfg['WINDOW_SIZE']
mlp_ratio = swin_cfg['MLP_RATIO']
qkv_bias = swin_cfg['QKV_BIAS']
qk_scale = swin_cfg['QK_SCALE']
drop_rate = swin_cfg['DROP_RATE']
attn_drop_rate = swin_cfg['ATTN_DROP_RATE']
drop_path_rate = swin_cfg['DROP_PATH_RATE']
norm_layer = nn.LayerNorm
ape = swin_cfg['APE']
patch_norm = swin_cfg['PATCH_NORM']
use_checkpoint = swin_cfg['USE_CHECKPOINT']
out_indices = swin_cfg.get('OUT_INDICES', [0,1,2,3])
swin = D2SwinTransformer(
swin_cfg,
pretrain_img_size,
patch_size,
in_chans,
embed_dim,
depths,
num_heads,
window_size,
mlp_ratio,
qkv_bias,
qk_scale,
drop_rate,
attn_drop_rate,
drop_path_rate,
norm_layer,
ape,
patch_norm,
out_indices,
use_checkpoint=use_checkpoint,
)
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
with PathManager.open(filename, "rb") as f:
ckpt = torch.load(f, map_location=cfg['device'])['model']
swin.load_weights(ckpt, swin_cfg.get('PRETRAINED_LAYERS', ['*']), cfg['VERBOSE'])
return swin | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/swin.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
from detectron2.modeling import ShapeSpec
__all__ = ["Backbone"]
class Backbone(nn.Module):
"""
Abstract base class for network backbones.
"""
def __init__(self):
"""
The `__init__` method of any subclass can specify its own set of arguments.
"""
super().__init__()
def forward(self):
"""
Subclasses must override this method, but adhere to the same return type.
Returns:
dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor
"""
pass
@property
def size_divisibility(self) -> int:
"""
Some backbones require the input height and width to be divisible by a
specific integer. This is typically true for encoder / decoder type networks
with lateral connection (e.g., FPN) for which feature maps need to match
dimension in the "bottom up" and "top down" paths. Set to 0 if no specific
input size divisibility is required.
"""
return 0
def output_shape(self):
"""
Returns:
dict[str->ShapeSpec]
"""
# this is a backward-compatible default
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/backbone.py |
# --------------------------------------------------------
# FocalNet for Semantic Segmentation
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Jianwei Yang
# --------------------------------------------------------
import math
import time
import numpy as np
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from detectron2.utils.file_io import PathManager
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
from .registry import register_backbone
logger = logging.getLogger(__name__)
class Mlp(nn.Module):
""" Multilayer perceptron."""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class FocalModulation(nn.Module):
""" Focal Modulation
Args:
dim (int): Number of input channels.
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
focal_level (int): Number of focal levels
focal_window (int): Focal window size at focal level 1
focal_factor (int, default=2): Step to increase the focal window
use_postln (bool, default=False): Whether use post-modulation layernorm
"""
def __init__(self, dim, proj_drop=0., focal_level=2, focal_window=7, focal_factor=2, use_postln=False, use_postln_in_modulation=False, scaling_modulator=False):
super().__init__()
self.dim = dim
# specific args for focalv3
self.focal_level = focal_level
self.focal_window = focal_window
self.focal_factor = focal_factor
self.use_postln_in_modulation = use_postln_in_modulation
self.scaling_modulator = scaling_modulator
self.f = nn.Linear(dim, 2*dim+(self.focal_level+1), bias=True)
self.h = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0, groups=1, bias=True)
self.act = nn.GELU()
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.focal_layers = nn.ModuleList()
if self.use_postln_in_modulation:
self.ln = nn.LayerNorm(dim)
for k in range(self.focal_level):
kernel_size = self.focal_factor*k + self.focal_window
self.focal_layers.append(
nn.Sequential(
nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1, groups=dim,
padding=kernel_size//2, bias=False),
nn.GELU(),
)
)
def forward(self, x):
""" Forward function.
Args:
x: input features with shape of (B, H, W, C)
"""
B, nH, nW, C = x.shape
x = self.f(x)
x = x.permute(0, 3, 1, 2).contiguous()
q, ctx, gates = torch.split(x, (C, C, self.focal_level+1), 1)
ctx_all = 0
for l in range(self.focal_level):
ctx = self.focal_layers[l](ctx)
ctx_all = ctx_all + ctx*gates[:, l:l+1]
ctx_global = self.act(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
ctx_all = ctx_all + ctx_global*gates[:,self.focal_level:]
if self.scaling_modulator:
ctx_all = ctx_all / (self.focal_level + 1)
x_out = q * self.h(ctx_all)
x_out = x_out.permute(0, 2, 3, 1).contiguous()
if self.use_postln_in_modulation:
x_out = self.ln(x_out)
x_out = self.proj(x_out)
x_out = self.proj_drop(x_out)
return x_out
class FocalModulationBlock(nn.Module):
""" Focal Modulation Block.
Args:
dim (int): Number of input channels.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
focal_level (int): number of focal levels
focal_window (int): focal kernel size at level 1
"""
def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm,
focal_level=2, focal_window=9,
use_postln=False, use_postln_in_modulation=False,
scaling_modulator=False,
use_layerscale=False,
layerscale_value=1e-4):
super().__init__()
self.dim = dim
self.mlp_ratio = mlp_ratio
self.focal_window = focal_window
self.focal_level = focal_level
self.use_postln = use_postln
self.use_layerscale = use_layerscale
self.dw1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1, groups=dim)
self.norm1 = norm_layer(dim)
self.modulation = FocalModulation(
dim, focal_window=self.focal_window, focal_level=self.focal_level, proj_drop=drop, use_postln_in_modulation=use_postln_in_modulation, scaling_modulator=scaling_modulator
)
self.dw2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1, groups=dim)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.H = None
self.W = None
self.gamma_1 = 1.0
self.gamma_2 = 1.0
if self.use_layerscale:
self.gamma_1 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
self.gamma_2 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
def forward(self, x):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()
x = x + self.dw1(x)
x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C)
shortcut = x
if not self.use_postln:
x = self.norm1(x)
x = x.view(B, H, W, C)
# FM
x = self.modulation(x).view(B, H * W, C)
x = shortcut + self.drop_path(self.gamma_1 * x)
if self.use_postln:
x = self.norm1(x)
x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()
x = x + self.dw2(x)
x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C)
if not self.use_postln:
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_2 * self.mlp(x))
x = self.norm2(x)
return x
class BasicLayer(nn.Module):
""" A basic focal modulation layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
drop (float, optional): Dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
focal_level (int): Number of focal levels
focal_window (int): Focal window size at focal level 1
use_conv_embed (bool): Use overlapped convolution for patch embedding or now. Default: False
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self,
dim,
depth,
mlp_ratio=4.,
drop=0.,
drop_path=0.,
norm_layer=nn.LayerNorm,
downsample=None,
focal_window=9,
focal_level=2,
use_conv_embed=False,
use_postln=False,
use_postln_in_modulation=False,
scaling_modulator=False,
use_layerscale=False,
use_checkpoint=False,
use_pre_norm=False,
):
super().__init__()
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
FocalModulationBlock(
dim=dim,
mlp_ratio=mlp_ratio,
drop=drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
focal_window=focal_window,
focal_level=focal_level,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
scaling_modulator=scaling_modulator,
use_layerscale=use_layerscale,
norm_layer=norm_layer)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(
patch_size=2,
in_chans=dim, embed_dim=2*dim,
use_conv_embed=use_conv_embed,
norm_layer=norm_layer,
is_stem=False,
use_pre_norm=use_pre_norm
)
else:
self.downsample = None
def forward(self, x, H, W):
""" Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x_reshaped = x.transpose(1, 2).view(x.shape[0], x.shape[-1], H, W)
x_down = self.downsample(x_reshaped)
x_down = x_down.flatten(2).transpose(1, 2)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
# class PatchEmbed(nn.Module):
# r""" Image to Patch Embedding
# Args:
# img_size (int): Image size. Default: 224.
# patch_size (int): Patch token size. Default: 4.
# in_chans (int): Number of input image channels. Default: 3.
# embed_dim (int): Number of linear projection output channels. Default: 96.
# norm_layer (nn.Module, optional): Normalization layer. Default: None
# """
# def __init__(self, img_size=(224, 224), patch_size=4, in_chans=3, embed_dim=96,
# use_conv_embed=False, norm_layer=None, is_stem=False, use_pre_norm=False):
# super().__init__()
# patch_size = to_2tuple(patch_size)
# patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
# self.img_size = img_size
# self.patch_size = patch_size
# self.patches_resolution = patches_resolution
# self.num_patches = patches_resolution[0] * patches_resolution[1]
# self.in_chans = in_chans
# self.embed_dim = embed_dim
# self.use_pre_norm = use_pre_norm
# if use_conv_embed:
# # if we choose to use conv embedding, then we treat the stem and non-stem differently
# if is_stem:
# kernel_size = 7; padding = 3; stride = 4
# else:
# kernel_size = 3; padding = 1; stride = 2
# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
# else:
# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
# if self.use_pre_norm:
# if norm_layer is not None:
# self.norm = norm_layer(in_chans)
# else:
# self.norm = None
# else:
# if norm_layer is not None:
# self.norm = norm_layer(embed_dim)
# else:
# self.norm = None
# def forward(self, x):
# B, C, H, W = x.shape
# # FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# if self.use_pre_norm:
# if self.norm is not None:
# x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
# x = self.norm(x).transpose(1, 2).view(B, C, H, W)
# x = self.proj(x).flatten(2).transpose(1, 2)
# else:
# x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
# if self.norm is not None:
# x = self.norm(x)
# return x
# def flops(self):
# Ho, Wo = self.patches_resolution
# flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
# if self.norm is not None:
# flops += Ho * Wo * self.embed_dim
# return flops
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
Args:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
use_conv_embed (bool): Whether use overlapped convolution for patch embedding. Default: False
is_stem (bool): Is the stem block or not.
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, use_conv_embed=False, is_stem=False, use_pre_norm=False):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.use_pre_norm = use_pre_norm
if use_conv_embed:
# if we choose to use conv embedding, then we treat the stem and non-stem differently
if is_stem:
kernel_size = 7; padding = 3; stride = 4
else:
kernel_size = 3; padding = 1; stride = 2
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if self.use_pre_norm:
if norm_layer is not None:
self.norm = norm_layer(in_chans)
else:
self.norm = None
else:
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
B, C, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
if self.use_pre_norm:
if self.norm is not None:
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
x = self.norm(x).transpose(1, 2).view(B, C, H, W)
x = self.proj(x)
else:
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class FocalNet(nn.Module):
""" FocalNet backbone.
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
drop_rate (float): Dropout rate.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
focal_levels (Sequence[int]): Number of focal levels at four stages
focal_windows (Sequence[int]): Focal window sizes at first focal level at four stages
use_conv_embed (bool): Whether use overlapped convolution for patch embedding
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self,
pretrain_img_size=1600,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
mlp_ratio=4.,
drop_rate=0.,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
patch_norm=True,
out_indices=[0, 1, 2, 3],
frozen_stages=-1,
focal_levels=[2,2,2,2],
focal_windows=[9,9,9,9],
use_pre_norms=[False, False, False, False],
use_conv_embed=False,
use_postln=False,
use_postln_in_modulation=False,
scaling_modulator=False,
use_layerscale=False,
use_checkpoint=False,
):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
use_conv_embed=use_conv_embed, is_stem=True, use_pre_norm=False)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(
dim=int(embed_dim * 2 ** i_layer),
depth=depths[i_layer],
mlp_ratio=mlp_ratio,
drop=drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
focal_window=focal_windows[i_layer],
focal_level=focal_levels[i_layer],
use_pre_norm=use_pre_norms[i_layer],
use_conv_embed=use_conv_embed,
use_postln=use_postln,
use_postln_in_modulation=use_postln_in_modulation,
scaling_modulator=scaling_modulator,
use_layerscale=use_layerscale,
use_checkpoint=use_checkpoint)
self.layers.append(layer)
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
self.num_features = num_features
# self.norm = norm_layer(num_features[-1])
# add a norm layer for each output
for i_layer in self.out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f'norm{i_layer}'
self.add_module(layer_name, layer)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def load_weights(self, pretrained_dict=None, pretrained_layers=[], verbose=True):
model_dict = self.state_dict()
missed_dict = [k for k in model_dict.keys() if k not in pretrained_dict]
logger.info(f'=> Missed keys {missed_dict}')
unexpected_dict = [k for k in pretrained_dict.keys() if k not in model_dict]
logger.info(f'=> Unexpected keys {unexpected_dict}')
pretrained_dict = {
k: v for k, v in pretrained_dict.items()
if k in model_dict.keys()
}
need_init_state_dict = {}
for k, v in pretrained_dict.items():
need_init = (
(
k.split('.')[0] in pretrained_layers
or pretrained_layers[0] == '*'
)
and 'relative_position_index' not in k
and 'attn_mask' not in k
)
if need_init:
# if verbose:
# logger.info(f'=> init {k} from {pretrained}')
if ('pool_layers' in k) or ('focal_layers' in k) and v.size() != model_dict[k].size():
table_pretrained = v
table_current = model_dict[k]
fsize1 = table_pretrained.shape[2]
fsize2 = table_current.shape[2]
# NOTE: different from interpolation used in self-attention, we use padding or clipping for focal conv
if fsize1 < fsize2:
table_pretrained_resized = torch.zeros(table_current.shape)
table_pretrained_resized[:, :, (fsize2-fsize1)//2:-(fsize2-fsize1)//2, (fsize2-fsize1)//2:-(fsize2-fsize1)//2] = table_pretrained
v = table_pretrained_resized
elif fsize1 > fsize2:
table_pretrained_resized = table_pretrained[:, :, (fsize1-fsize2)//2:-(fsize1-fsize2)//2, (fsize1-fsize2)//2:-(fsize1-fsize2)//2]
v = table_pretrained_resized
if ("modulation.f" in k or "pre_conv" in k):
table_pretrained = v
table_current = model_dict[k]
if table_pretrained.shape != table_current.shape:
if len(table_pretrained.shape) == 2:
dim = table_pretrained.shape[1]
assert table_current.shape[1] == dim
L1 = table_pretrained.shape[0]
L2 = table_current.shape[0]
if L1 < L2:
table_pretrained_resized = torch.zeros(table_current.shape)
# copy for linear project
table_pretrained_resized[:2*dim] = table_pretrained[:2*dim]
# copy for global token gating
table_pretrained_resized[-1] = table_pretrained[-1]
# copy for first multiple focal levels
table_pretrained_resized[2*dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
# reassign pretrained weights
v = table_pretrained_resized
elif L1 > L2:
raise NotImplementedError
elif len(table_pretrained.shape) == 1:
dim = table_pretrained.shape[0]
L1 = table_pretrained.shape[0]
L2 = table_current.shape[0]
if L1 < L2:
table_pretrained_resized = torch.zeros(table_current.shape)
# copy for linear project
table_pretrained_resized[:dim] = table_pretrained[:dim]
# copy for global token gating
table_pretrained_resized[-1] = table_pretrained[-1]
# copy for first multiple focal levels
# table_pretrained_resized[dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
# reassign pretrained weights
v = table_pretrained_resized
elif L1 > L2:
raise NotImplementedError
need_init_state_dict[k] = v
self.load_state_dict(need_init_state_dict, strict=False)
def forward(self, x):
"""Forward function."""
tic = time.time()
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = {}
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f'norm{i}')
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs["res{}".format(i + 2)] = out
if len(self.out_indices) == 0:
outs["res5"] = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
toc = time.time()
return outs
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(FocalNet, self).train(mode)
self._freeze_stages()
class D2FocalNet(FocalNet, Backbone):
def __init__(self, cfg, input_shape):
pretrain_img_size = cfg['BACKBONE']['FOCAL']['PRETRAIN_IMG_SIZE']
patch_size = cfg['BACKBONE']['FOCAL']['PATCH_SIZE']
in_chans = 3
embed_dim = cfg['BACKBONE']['FOCAL']['EMBED_DIM']
depths = cfg['BACKBONE']['FOCAL']['DEPTHS']
mlp_ratio = cfg['BACKBONE']['FOCAL']['MLP_RATIO']
drop_rate = cfg['BACKBONE']['FOCAL']['DROP_RATE']
drop_path_rate = cfg['BACKBONE']['FOCAL']['DROP_PATH_RATE']
norm_layer = nn.LayerNorm
patch_norm = cfg['BACKBONE']['FOCAL']['PATCH_NORM']
use_checkpoint = cfg['BACKBONE']['FOCAL']['USE_CHECKPOINT']
out_indices = cfg['BACKBONE']['FOCAL']['OUT_INDICES']
scaling_modulator = cfg['BACKBONE']['FOCAL'].get('SCALING_MODULATOR', False)
super().__init__(
pretrain_img_size,
patch_size,
in_chans,
embed_dim,
depths,
mlp_ratio,
drop_rate,
drop_path_rate,
norm_layer,
patch_norm,
out_indices,
focal_levels=cfg['BACKBONE']['FOCAL']['FOCAL_LEVELS'],
focal_windows=cfg['BACKBONE']['FOCAL']['FOCAL_WINDOWS'],
use_conv_embed=cfg['BACKBONE']['FOCAL']['USE_CONV_EMBED'],
use_postln=cfg['BACKBONE']['FOCAL']['USE_POSTLN'],
use_postln_in_modulation=cfg['BACKBONE']['FOCAL']['USE_POSTLN_IN_MODULATION'],
scaling_modulator=scaling_modulator,
use_layerscale=cfg['BACKBONE']['FOCAL']['USE_LAYERSCALE'],
use_checkpoint=use_checkpoint,
)
self._out_features = cfg['BACKBONE']['FOCAL']['OUT_FEATURES']
self._out_feature_strides = {
"res2": 4,
"res3": 8,
"res4": 16,
"res5": 32,
}
self._out_feature_channels = {
"res2": self.num_features[0],
"res3": self.num_features[1],
"res4": self.num_features[2],
"res5": self.num_features[3],
}
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert (
x.dim() == 4
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
y = super().forward(x)
for k in y.keys():
if k in self._out_features:
outputs[k] = y[k]
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
@property
def size_divisibility(self):
return 32
@register_backbone
def get_focal_backbone(cfg):
focal = D2FocalNet(cfg['MODEL'], 224)
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
logger.info(f'=> init from {filename}')
with PathManager.open(filename, "rb") as f:
ckpt = torch.load(f)['model']
focal.load_weights(ckpt, cfg['MODEL']['BACKBONE']['FOCAL'].get('PRETRAINED_LAYERS', ['*']), cfg['VERBOSE'])
return focal | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/focal_dw.py |
from .build import build_backbone
from .resnet import *
from .swin import *
from .focal import *
from .focal_dw import *
from .backbone import *
from .davit import * | Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import pickle
import numpy as np
from typing import Any, Dict
import fvcore.nn.weight_init as weight_init
import torch
import torch.nn.functional as F
from torch import nn
from .backbone import Backbone
from .registry import register_backbone
from detectron2.layers import (
CNNBlockBase,
Conv2d,
DeformConv,
ModulatedDeformConv,
ShapeSpec,
get_norm,
)
from detectron2.utils.file_io import PathManager
__all__ = [
"ResNetBlockBase",
"BasicBlock",
"BottleneckBlock",
"DeformBottleneckBlock",
"BasicStem",
"ResNet",
"make_stage",
"get_resnet_backbone",
]
class BasicBlock(CNNBlockBase):
"""
The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`,
with two 3x3 conv layers and a projection shortcut if needed.
"""
def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"):
"""
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int): Stride for the first conv.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.conv2 = Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BottleneckBlock(CNNBlockBase):
"""
The standard bottleneck residual block used by ResNet-50, 101 and 152
defined in :paper:`ResNet`. It contains 3 conv layers with kernels
1x1, 3x3, 1x1, and a projection shortcut if needed.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
"""
Args:
bottleneck_channels (int): number of output channels for the 3x3
"bottleneck" conv layers.
num_groups (int): number of groups for the 3x3 conv layer.
norm (str or callable): normalization for all conv layers.
See :func:`layers.get_norm` for supported format.
stride_in_1x1 (bool): when stride>1, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
dilation (int): the dilation rate of the 3x3 conv layer.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv2d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
out = self.conv2(out)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class DeformBottleneckBlock(CNNBlockBase):
"""
Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv <deformconv>`
in the 3x3 convolution.
"""
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
deform_modulated=False,
deform_num_groups=1,
):
super().__init__(in_channels, out_channels, stride)
self.deform_modulated = deform_modulated
if in_channels != out_channels:
self.shortcut = Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if deform_modulated:
deform_conv_op = ModulatedDeformConv
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
offset_channels = 27
else:
deform_conv_op = DeformConv
offset_channels = 18
self.conv2_offset = Conv2d(
bottleneck_channels,
offset_channels * deform_num_groups,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
dilation=dilation,
)
self.conv2 = deform_conv_op(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
deformable_groups=deform_num_groups,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv2d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
nn.init.constant_(self.conv2_offset.weight, 0)
nn.init.constant_(self.conv2_offset.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = F.relu_(out)
if self.deform_modulated:
offset_mask = self.conv2_offset(out)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = F.relu_(out)
return out
class BasicStem(CNNBlockBase):
"""
The standard ResNet stem (layers before the first residual block),
with a conv, relu and max_pool.
"""
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): norm after the first conv layer.
See :func:`layers.get_norm` for supported format.
"""
super().__init__(in_channels, out_channels, 4)
self.in_channels = in_channels
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
return x
class ResNet(Backbone):
"""
Implement :paper:`ResNet`.
"""
def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[CNNBlockBase]]): several (typically 4) stages,
each contains multiple :class:`CNNBlockBase`.
num_classes (None or int): if None, will not perform classification.
Otherwise, will create a linear layer.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
freeze_at (int): The number of stages at the beginning to freeze.
see :meth:`freeze` for detailed explanation.
"""
super().__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stage_names, self.stages = [], []
if out_features is not None:
# Avoid keeping unused layers in this module. They consume extra memory
# and may cause allreduce to fail
num_stages = max(
[{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features]
)
stages = stages[:num_stages]
for i, blocks in enumerate(stages):
assert len(blocks) > 0, len(blocks)
for block in blocks:
assert isinstance(block, CNNBlockBase), block
name = "res" + str(i + 2)
stage = nn.Sequential(*blocks)
self.add_module(name, stage)
self.stage_names.append(name)
self.stages.append(stage)
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels
self.stage_names = tuple(self.stage_names) # Make it static for scripting
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(curr_channels, num_classes)
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "The 1000-way fully-connected layer is initialized by
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
self.freeze(freeze_at)
def forward(self, x):
"""
Args:
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
Returns:
dict[str->Tensor]: names and the corresponding features
"""
assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!"
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for name, stage in zip(self.stage_names, self.stages):
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def freeze(self, freeze_at=0):
"""
Freeze the first several stages of the ResNet. Commonly used in
fine-tuning.
Layers that produce the same feature map spatial size are defined as one
"stage" by :paper:`FPN`.
Args:
freeze_at (int): number of stages to freeze.
`1` means freezing the stem. `2` means freezing the stem and
one residual stage, etc.
Returns:
nn.Module: this ResNet itself
"""
if freeze_at >= 1:
self.stem.freeze()
for idx, stage in enumerate(self.stages, start=2):
if freeze_at >= idx:
for block in stage.children():
block.freeze()
return self
@staticmethod
def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):
"""
Create a list of blocks of the same type that forms one ResNet stage.
Args:
block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this
stage. A module of this type must not change spatial resolution of inputs unless its
stride != 1.
num_blocks (int): number of blocks in this stage
in_channels (int): input channels of the entire stage.
out_channels (int): output channels of **every block** in the stage.
kwargs: other arguments passed to the constructor of
`block_class`. If the argument name is "xx_per_block", the
argument is a list of values to be passed to each block in the
stage. Otherwise, the same argument is passed to every block
in the stage.
Returns:
list[CNNBlockBase]: a list of block module.
Examples:
::
stage = ResNet.make_stage(
BottleneckBlock, 3, in_channels=16, out_channels=64,
bottleneck_channels=16, num_groups=1,
stride_per_block=[2, 1, 1],
dilations_per_block=[1, 1, 2]
)
Usually, layers that produce the same feature map spatial size are defined as one
"stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should
all be 1.
"""
blocks = []
for i in range(num_blocks):
curr_kwargs = {}
for k, v in kwargs.items():
if k.endswith("_per_block"):
assert len(v) == num_blocks, (
f"Argument '{k}' of make_stage should have the "
f"same length as num_blocks={num_blocks}."
)
newk = k[: -len("_per_block")]
assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
curr_kwargs[newk] = v[i]
else:
curr_kwargs[k] = v
blocks.append(
block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)
)
in_channels = out_channels
return blocks
@staticmethod
def make_default_stages(depth, block_class=None, **kwargs):
"""
Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).
If it doesn't create the ResNet variant you need, please use :meth:`make_stage`
instead for fine-grained customization.
Args:
depth (int): depth of ResNet
block_class (type): the CNN block class. Has to accept
`bottleneck_channels` argument for depth > 50.
By default it is BasicBlock or BottleneckBlock, based on the
depth.
kwargs:
other arguments to pass to `make_stage`. Should not contain
stride and channels, as they are predefined for each depth.
Returns:
list[list[CNNBlockBase]]: modules in all stages; see arguments of
:class:`ResNet.__init__`.
"""
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if block_class is None:
block_class = BasicBlock if depth < 50 else BottleneckBlock
if depth < 50:
in_channels = [64, 64, 128, 256]
out_channels = [64, 128, 256, 512]
else:
in_channels = [64, 256, 512, 1024]
out_channels = [256, 512, 1024, 2048]
ret = []
for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):
if depth >= 50:
kwargs["bottleneck_channels"] = o // 4
ret.append(
ResNet.make_stage(
block_class=block_class,
num_blocks=n,
stride_per_block=[s] + [1] * (n - 1),
in_channels=i,
out_channels=o,
**kwargs,
)
)
return ret
ResNetBlockBase = CNNBlockBase
"""
Alias for backward compatibiltiy.
"""
def make_stage(*args, **kwargs):
"""
Deprecated alias for backward compatibiltiy.
"""
return ResNet.make_stage(*args, **kwargs)
def _convert_ndarray_to_tensor(state_dict: Dict[str, Any]) -> None:
"""
In-place convert all numpy arrays in the state_dict to torch tensor.
Args:
state_dict (dict): a state-dict to be loaded to the model.
Will be modified.
"""
# model could be an OrderedDict with _metadata attribute
# (as returned by Pytorch's state_dict()). We should preserve these
# properties.
for k in list(state_dict.keys()):
v = state_dict[k]
if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor):
raise ValueError(
"Unsupported type found in checkpoint! {}: {}".format(k, type(v))
)
if not isinstance(v, torch.Tensor):
state_dict[k] = torch.from_numpy(v)
@register_backbone
def get_resnet_backbone(cfg):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
res_cfg = cfg['MODEL']['BACKBONE']['RESNETS']
# need registration of new blocks/stems?
norm = res_cfg['NORM']
stem = BasicStem(
in_channels=res_cfg['STEM_IN_CHANNELS'],
out_channels=res_cfg['STEM_OUT_CHANNELS'],
norm=norm,
)
# fmt: off
freeze_at = res_cfg['FREEZE_AT']
out_features = res_cfg['OUT_FEATURES']
depth = res_cfg['DEPTH']
num_groups = res_cfg['NUM_GROUPS']
width_per_group = res_cfg['WIDTH_PER_GROUP']
bottleneck_channels = num_groups * width_per_group
in_channels = res_cfg['STEM_OUT_CHANNELS']
out_channels = res_cfg['RES2_OUT_CHANNELS']
stride_in_1x1 = res_cfg['STRIDE_IN_1X1']
res5_dilation = res_cfg['RES5_DILATION']
deform_on_per_stage = res_cfg['DEFORM_ON_PER_STAGE']
deform_modulated = res_cfg['DEFORM_MODULATED']
deform_num_groups = res_cfg['DEFORM_NUM_GROUPS']
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
}[depth]
if depth in [18, 34]:
assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34"
assert not any(
deform_on_per_stage
), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34"
assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34"
assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34"
stages = []
for idx, stage_idx in enumerate(range(2, 6)):
# res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
"in_channels": in_channels,
"out_channels": out_channels,
"norm": norm,
}
# Use BasicBlock for R18 and R34.
if depth in [18, 34]:
stage_kargs["block_class"] = BasicBlock
else:
stage_kargs["bottleneck_channels"] = bottleneck_channels
stage_kargs["stride_in_1x1"] = stride_in_1x1
stage_kargs["dilation"] = dilation
stage_kargs["num_groups"] = num_groups
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock
blocks = ResNet.make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
stages.append(blocks)
backbone = ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at)
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
with PathManager.open(filename, "rb") as f:
ckpt = pickle.load(f, encoding="latin1")['model']
_convert_ndarray_to_tensor(ckpt)
ckpt.pop('stem.fc.weight')
ckpt.pop('stem.fc.bias')
backbone.load_state_dict(ckpt)
return backbone
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/xdecoder/backbone/resnet.py |
"""SAMPLING ONLY."""
import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from .util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning)
img, pred_x0 = outs
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/utils/ddim.py |
import math
import numpy as np
def get_prompt_templates():
prompt_templates = [
'{}.',
'a photo of a {}.',
'a bad photo of a {}.',
'a photo of many {}.',
'a sculpture of a {}.',
'a photo of the hard to see {}.',
'a low resolution photo of the {}.',
'a rendering of a {}.',
'graffiti of a {}.',
'a bad photo of the {}.',
'a cropped photo of the {}.',
'a tattoo of a {}.',
'the embroidered {}.',
'a photo of a hard to see {}.',
'a bright photo of a {}.',
'a photo of a clean {}.',
'a photo of a dirty {}.',
'a dark photo of the {}.',
'a drawing of a {}.',
'a photo of my {}.',
'the plastic {}.',
'a photo of the cool {}.',
'a close-up photo of a {}.',
'a black and white photo of the {}.',
'a painting of the {}.',
'a painting of a {}.',
'a pixelated photo of the {}.',
'a sculpture of the {}.',
'a bright photo of the {}.',
'a cropped photo of a {}.',
'a plastic {}.',
'a photo of the dirty {}.',
'a jpeg corrupted photo of a {}.',
'a blurry photo of the {}.',
'a photo of the {}.',
'a good photo of the {}.',
'a rendering of the {}.',
'a {} in a video game.',
'a photo of one {}.',
'a doodle of a {}.',
'a close-up photo of the {}.',
'the origami {}.',
'the {} in a video game.',
'a sketch of a {}.',
'a doodle of the {}.',
'a origami {}.',
'a low resolution photo of a {}.',
'the toy {}.',
'a rendition of the {}.',
'a photo of the clean {}.',
'a photo of a large {}.',
'a rendition of a {}.',
'a photo of a nice {}.',
'a photo of a weird {}.',
'a blurry photo of a {}.',
'a cartoon {}.',
'art of a {}.',
'a sketch of the {}.',
'a embroidered {}.',
'a pixelated photo of a {}.',
'itap of the {}.',
'a jpeg corrupted photo of the {}.',
'a good photo of a {}.',
'a plushie {}.',
'a photo of the nice {}.',
'a photo of the small {}.',
'a photo of the weird {}.',
'the cartoon {}.',
'art of the {}.',
'a drawing of the {}.',
'a photo of the large {}.',
'a black and white photo of a {}.',
'the plushie {}.',
'a dark photo of a {}.',
'itap of a {}.',
'graffiti of the {}.',
'a toy {}.',
'itap of my {}.',
'a photo of a cool {}.',
'a photo of a small {}.',
'a tattoo of the {}.',
]
return prompt_templates
def prompt_engineering(classnames, topk=1, suffix='.'):
prompt_templates = get_prompt_templates()
temp_idx = np.random.randint(min(len(prompt_templates), topk))
if isinstance(classnames, list):
classname = random.choice(classnames)
else:
classname = classnames
return prompt_templates[temp_idx].replace('.', suffix).format(classname.replace(',', '').replace('+', ' '))
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1, decay=0):
self.val = val
if decay:
alpha = math.exp(-n / decay) # exponential decay over 100 updates
self.sum = alpha * self.sum + (1 - alpha) * val * n
self.count = alpha * self.count + (1 - alpha) * n
else:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| Segment-Everything-Everywhere-All-At-Once-main | demo_code/utils/misc.py |
from fvcore.common.config import CfgNode as _CfgNode
class CfgNode(_CfgNode):
"""
The same as `fvcore.common.config.CfgNode`, but different in:
1. Use unsafe yaml loading by default.
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
2. Support config versioning.
When attempting to merge an old config, it will convert the old config automatically.
.. automethod:: clone
.. automethod:: freeze
.. automethod:: defrost
.. automethod:: is_frozen
.. automethod:: load_yaml_with_base
.. automethod:: merge_from_list
.. automethod:: merge_from_other_cfg
"""
def merge_from_dict(self, dict):
pass
node = CfgNode() | Segment-Everything-Everywhere-All-At-Once-main | demo_code/utils/Config.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.