Mehdi Lakbar
Initial demo of Lina-speech (pardi-speech)
56cfa73
raw
history blame
1.82 kB
import torch
from torch import nn
class ConvNeXtBlock(nn.Module):
def __init__(
self,
dim: int,
intermediate_dim: int | None = None,
layer_scale_init_value: float = 0.0,
elementwise_affine_ln: bool = True,
kernel_size: int = 5,
):
super().__init__()
intermediate_dim = intermediate_dim if intermediate_dim is not None else dim * 3
self.dwconv = nn.Conv1d(
dim, dim, kernel_size=kernel_size, padding=kernel_size // 2, groups=dim
) # depthwise conv
self.norm = nn.LayerNorm(
dim, eps=1e-6, elementwise_affine=elementwise_affine_ln
)
self.pwconv1 = nn.Linear(
dim, intermediate_dim
) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(intermediate_dim, dim)
self.gamma = (
nn.Parameter(layer_scale_init_value * torch.ones(dim), requires_grad=True)
if layer_scale_init_value > 0
else None
)
def forward(
self,
x: torch.Tensor,
scale_shift: tuple[torch.Tensor, torch.Tensor] | None = None,
gate: torch.Tensor | None = None,
) -> torch.Tensor:
residual = x
x = self.dwconv(x)
x = x.transpose(1, 2) # (B, C, T) -> (B, T, C)
x = self.norm(x)
if scale_shift is not None:
scale, shift = scale_shift
x = x * scale[:, None] + shift[:, None]
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
if gate is not None:
x = gate[:, None] * x
x = x.transpose(1, 2) # (B, T, C) -> (B, C, T)
x = residual + x
return x