|
from typing import List |
|
|
|
from transformers import PretrainedConfig, AutoTokenizer |
|
|
|
|
|
def config_to_moe_args(config): |
|
from megablocks.layers.arguments import Arguments as MoEArgs |
|
import torch.nn.functional as F |
|
|
|
|
|
|
|
kwargs = { |
|
"activation_fn": F.silu, |
|
"mlp_type": "glu" if "glu" in config.activation_type.lower() else "mlp", |
|
"mlp_impl": "sparse", |
|
"hidden_size": config.d_model, |
|
"ffn_hidden_size": config.mlp_hidden_size, |
|
"moe_num_experts": 64, |
|
"num_layers": config.n_layers, |
|
|
|
"moe_weight_parallelism": False, |
|
"moe_expert_model_parallelism": False, |
|
"moe_top_k": 8, |
|
|
|
|
|
|
|
"bf16": False, |
|
"fp16": False, |
|
"bias": False, |
|
"return_bias": False, |
|
} |
|
|
|
return MoEArgs(**kwargs) |
|
|
|
class MolmoeConfig(PretrainedConfig): |
|
model_type = "molmoe" |
|
keys_to_ignore_at_inference = ["past_key_values"] |
|
|
|
def __init__( |
|
self, |
|
vocab_size=50304, |
|
embedding_size=50304, |
|
hidden_size=4096, |
|
intermediate_size=11008, |
|
num_hidden_layers=32, |
|
num_attention_heads=32, |
|
num_key_value_heads=None, |
|
max_position_embeddings=2048, |
|
initializer_range=0.02, |
|
use_cache=True, |
|
layer_norm_eps: float = 1e-5, |
|
rope_theta=10000.0, |
|
clip_qkv=None, |
|
qkv_bias: bool = False, |
|
weight_tying: bool = False, |
|
use_position_ids: bool=True, |
|
tie_word_embeddings: bool=True, |
|
moe_num_experts: int = 64, |
|
moe_top_k: int = 8, |
|
**kwargs, |
|
): |
|
self.vocab_size = vocab_size |
|
self.embedding_size = embedding_size |
|
self.max_position_embeddings = max_position_embeddings |
|
self.hidden_size = hidden_size |
|
self.intermediate_size = intermediate_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.layer_norm_eps = layer_norm_eps |
|
self.weight_tying = weight_tying |
|
self.use_position_ids = use_position_ids |
|
|
|
|
|
if num_key_value_heads is None: |
|
num_key_value_heads = num_attention_heads |
|
|
|
self.num_key_value_heads = num_key_value_heads |
|
self.initializer_range = initializer_range |
|
self.use_cache = use_cache |
|
self.rope_theta = rope_theta |
|
self.clip_qkv = clip_qkv |
|
self.qkv_bias = qkv_bias |
|
self.tie_word_embeddings = tie_word_embeddings |
|
|
|
super().__init__( |
|
tie_word_embeddings=tie_word_embeddings, |
|
**kwargs, |
|
) |
|
|
|
MolmoeConfig.register_for_auto_class() |