Update neighborexchange.py
Browse files- neighborexchange.py +81 -2
neighborexchange.py
CHANGED
@@ -1,3 +1,82 @@
|
|
1 |
-
|
2 |
-
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
|
4 |
+
class NeighborExchange(nn.Module):
|
5 |
+
def __init__(self, config: MeshConfig):
|
6 |
+
super().__init__()
|
7 |
+
self.config = config
|
8 |
+
self.num_experts_x = config.mesh_grid_size[0]
|
9 |
+
self.num_experts_y = config.mesh_grid_size[1]
|
10 |
+
self.num_experts = self.num_experts_x * self.num_experts_y
|
11 |
+
|
12 |
+
# Define parameters for neighbor communication.
|
13 |
+
# A simple approach: a learned linear combination of neighbor features.
|
14 |
+
# We can define a weight for each potential neighbor direction (e.g., up, down, left, right).
|
15 |
+
# For a 2x2 grid, each expert has 2 or 3 neighbors.
|
16 |
+
# A more general approach is a linear layer that takes concatenated neighbor features.
|
17 |
+
# Let's use a linear layer to transform the aggregated neighbor information.
|
18 |
+
# The input size to this layer will be the sum of hidden sizes of all potential neighbors
|
19 |
+
# multiplied by the hidden size, but that's too complex.
|
20 |
+
# A simpler approach: a linear layer per direction, or a single layer after aggregating.
|
21 |
+
|
22 |
+
# Let's define a linear layer to process the information received from neighbors.
|
23 |
+
# The input size is the hidden size (from neighbors), output size is hidden size
|
24 |
+
# This layer will transform the aggregated neighbor features before adding to the expert's own output.
|
25 |
+
self.exchange_projection = nn.Linear(config.hidden_size, config.hidden_size) # Projects aggregated neighbor info
|
26 |
+
|
27 |
+
# Optional: Learned weights for different neighbor directions
|
28 |
+
# self.neighbor_weights = nn.Parameter(torch.ones(4)) # Example for 4 directions (N, S, E, W)
|
29 |
+
|
30 |
+
def forward(self, expert_outputs, expert_indices=None):
|
31 |
+
# expert_outputs shape: (batch_size, sequence_length, num_experts, hidden_size)
|
32 |
+
# expert_indices shape: (batch_size, sequence_length, k) - indices of selected experts (not directly used for neighbor exchange in this simple model)
|
33 |
+
|
34 |
+
if not self.config.neighbor_exchange_enabled:
|
35 |
+
return expert_outputs
|
36 |
+
|
37 |
+
batch_size, seq_length, num_experts, hidden_size = expert_outputs.shape
|
38 |
+
|
39 |
+
# Reshape expert_outputs to reflect the grid structure (batch_size, seq_length, grid_x, grid_y, hidden_size)
|
40 |
+
reshaped_outputs = expert_outputs.view(batch_size, seq_length, self.num_experts_x, self.num_experts_y, hidden_size)
|
41 |
+
|
42 |
+
# Create a tensor to store the aggregated neighbor information for each expert
|
43 |
+
aggregated_neighbor_info = torch.zeros_like(reshaped_outputs)
|
44 |
+
|
45 |
+
# Implement neighbor exchange logic
|
46 |
+
# Iterate through each expert in the grid
|
47 |
+
for i in range(self.num_experts_x):
|
48 |
+
for j in range(self.num_experts_y):
|
49 |
+
current_expert_output = reshaped_outputs[:, :, i, j, :]
|
50 |
+
neighbor_info = torch.zeros_like(current_expert_output) # Accumulate info from neighbors
|
51 |
+
|
52 |
+
# Define neighbor directions (example: up, down, left, right)
|
53 |
+
neighbors = []
|
54 |
+
if i > 0: # Up neighbor
|
55 |
+
neighbors.append(reshaped_outputs[:, :, i-1, j, :])
|
56 |
+
if i < self.num_experts_x - 1: # Down neighbor
|
57 |
+
neighbors.append(reshaped_outputs[:, :, i+1, j, :])
|
58 |
+
if j > 0: # Left neighbor
|
59 |
+
neighbors.append(reshpaced_outputs[:, :, i, j-1, :])
|
60 |
+
if j < self.num_experts_y - 1: # Right neighbor
|
61 |
+
neighbors.append(reshaped_outputs[:, :, i, j+1, :])
|
62 |
+
|
63 |
+
# Aggregate information from neighbors (simple average as an example)
|
64 |
+
if neighbors:
|
65 |
+
# Stack neighbors along a new dimension and take the mean
|
66 |
+
neighbor_stack = torch.stack(neighbors, dim=-2) # shape (batch, seq, num_neighbors, hidden)
|
67 |
+
aggregated_info = torch.mean(neighbor_stack, dim=-2) # shape (batch, seq, hidden)
|
68 |
+
neighbor_info = aggregated_info # Use the aggregated info
|
69 |
+
|
70 |
+
# Apply the exchange projection to the aggregated neighbor information
|
71 |
+
transformed_neighbor_info = self.exchange_projection(neighbor_info)
|
72 |
+
|
73 |
+
# Store the transformed neighbor info for the current expert's position
|
74 |
+
aggregated_neighbor_info[:, :, i, j, :] = transformed_neighbor_info
|
75 |
+
|
76 |
+
# Reshape aggregated_neighbor_info back to (batch_size, sequence_length, num_experts, hidden_size)
|
77 |
+
aggregated_neighbor_info = aggregated_neighbor_info.view(batch_size, seq_length, num_experts, hidden_size)
|
78 |
+
|
79 |
+
# Combine expert outputs with aggregated neighbor information (additive combination)
|
80 |
+
exchanged_expert_outputs = expert_outputs + aggregated_neighbor_info
|
81 |
+
|
82 |
+
return exchanged_expert_outputs
|