aquiffoo commited on
Commit
0bf67d2
·
verified ·
1 Parent(s): f7db63c

Update meshconfig.py

Browse files
Files changed (1) hide show
  1. meshconfig.py +63 -2
meshconfig.py CHANGED
@@ -1,3 +1,64 @@
1
- # Source code for MeshConfig from cell VExhmWA0lXA_
2
- # Please replace this with the actual code from the notebook cell.
 
 
 
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig, PreTrainedModel, AutoModelForCausalLM
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import math
6
+ from transformers.modeling_outputs import CausalLMOutputWithPast
7
 
8
+ class MeshConfig(PretrainedConfig):
9
+ model_type = "mesh"
10
+
11
+ def __init__(
12
+ self,
13
+ vocab_size=32000,
14
+ hidden_size=768,
15
+ intermediate_size=2048,
16
+ num_hidden_layers=12,
17
+ num_attention_heads=12,
18
+ num_key_value_heads=12,
19
+ max_position_embeddings=4096,
20
+ initializer_range=0.02,
21
+ rms_norm_eps=1e-6,
22
+ use_cache=True,
23
+ pad_token_id=0,
24
+ bos_token_id=1,
25
+ eos_token_id=2,
26
+ tie_word_embeddings=False,
27
+ # Mesh specific configurations
28
+ mesh_grid_size=(2, 2), # 2x2 grid
29
+ expert_intermediate_size=256, # Example size for expert intermediate layer
30
+ routing_k=2, # Top-k routing
31
+ neighbor_exchange_enabled=True,
32
+ cross_expert_attention_enabled=True,
33
+ **kwargs
34
+ ):
35
+ super().__init__(
36
+ vocab_size=vocab_size,
37
+ hidden_size=hidden_size,
38
+ intermediate_size=intermediate_size,
39
+ num_hidden_layers=num_hidden_layers,
40
+ num_attention_heads=num_attention_heads,
41
+ num_key_value_heads=num_key_value_heads,
42
+ max_position_embeddings=max_position_embeddings,
43
+ initializer_range=initializer_range,
44
+ rms_norm_eps=rms_norm_eps,
45
+ use_cache=use_cache,
46
+ pad_token_id=pad_token_id,
47
+ bos_token_id=bos_token_id,
48
+ eos_token_id=eos_token_id,
49
+ tie_word_embeddings=tie_word_embeddings,
50
+ **kwargs,
51
+ )
52
+ self.mesh_grid_size = mesh_grid_size
53
+ # Calculate expert_intermediate_size based on the shared and expert parameter split
54
+ # Total parameters = Shared (Embedding, Norm, LM Head) + Experts + Overhead
55
+ # This calculation is complex and depends on the specific layer mapping.
56
+ # For now, let's use a placeholder or calculate it based on the target parameter count.
57
+ # Target A242M (top-2): 100M shared + 135M (2 experts) + 7M overhead = 242M
58
+ # Let's assume the 135M for 2 experts is primarily in the intermediate size.
59
+ # We need to determine how Gemma's intermediate size maps to the expert intermediate size.
60
+ # For now, I will keep a placeholder or a simple ratio.
61
+ self.expert_intermediate_size = intermediate_size // (mesh_grid_size[0] * mesh_grid_size[1]) # Example: divide intermediate size by number of experts
62
+ self.routing_k = routing_k
63
+ self.neighbor_exchange_enabled = neighbor_exchange_enabled
64
+ self.cross_expert_attention_enabled = cross_expert_attention_enabled