from transformers import PretrainedConfig | |
class ThetaConfig(PretrainedConfig): | |
model_type = "theta" | |
def __init__( | |
self, | |
vocab_size=32000, | |
hidden_size=4096, | |
intermediate_size=11008, | |
num_hidden_layers=32, | |
num_attention_heads=32, | |
hidden_act="silu", | |
max_position_embeddings=2048, | |
initializer_range=0.02, | |
rms_norm_eps=1e-5, | |
use_cache=True, | |
**kwargs, | |
): | |
super().__init__(**kwargs) | |
self.vocab_size = vocab_size | |
self.hidden_size = hidden_size | |
self.intermediate_size = intermediate_size | |
self.num_hidden_layers = num_hidden_layers | |
self.num_attention_heads = num_attention_heads | |
self.hidden_act = hidden_act | |
self.max_position_embeddings = max_position_embeddings | |
self.initializer_range = initializer_range | |
self.rms_norm_eps = rms_norm_eps | |
self.use_cache = use_cache | |