IndiSent / model_configs /custom_transformer.py
hussain-shk's picture
Duplicate from ai4bharat/IndicTrans-Indic2English
e8aeaf1
from fairseq.models import register_model_architecture
from fairseq.models.transformer import base_architecture
@register_model_architecture("transformer", "transformer_2x")
def transformer_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_architecture(args)
@register_model_architecture("transformer", "transformer_4x")
def transformer_huge(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1536)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1536)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_architecture(args)
@register_model_architecture("transformer", "transformer_9x")
def transformer_xlarge(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 2048)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 8192)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 2048)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 8192)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
base_architecture(args)