Vijayendra commited on
Commit
c266894
·
verified ·
1 Parent(s): 6f82439

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +308 -0
README.md ADDED
@@ -0,0 +1,308 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ datasets:
4
+ - uoft-cs/cifar10
5
+ language:
6
+ - en
7
+ metrics:
8
+ - accuracy:96.7 %
9
+ ---
10
+
11
+
12
+ # Install necessary libraries
13
+
14
+ ```python
15
+ # Import necessary libraries
16
+ import os
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+ import numpy as np
21
+ from torch.utils.data import DataLoader
22
+ import torchvision.transforms as transforms
23
+ import torchvision.datasets as datasets
24
+ import xgboost as xgb
25
+ from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
26
+ from sklearn.model_selection import train_test_split
27
+ import matplotlib.pyplot as plt
28
+ from huggingface_hub import hf_hub_download
29
+
30
+ # Set device to GPU if available
31
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
+ print(f'Using device: {device}')
33
+
34
+ # Define your Hugging Face username and repository names
35
+ username = "Vijayendra" # Replace with your actual Hugging Face username
36
+ model_name_epoch_125 = "QST-CIFAR10-Epoch125"
37
+ model_name_best = "QST-CIFAR10-BestModel"
38
+
39
+ # Directory where the models will be downloaded
40
+ save_dir = './hf_models'
41
+ os.makedirs(save_dir, exist_ok=True)
42
+
43
+ # Data normalization for CIFAR-10
44
+ transform_test = transforms.Compose([
45
+ transforms.ToTensor(),
46
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2470, 0.2435, 0.2616))
47
+ ])
48
+
49
+ # Load CIFAR-10 test set
50
+ cifar10_test = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
51
+ test_loader = DataLoader(cifar10_test, batch_size=128, shuffle=False, num_workers=4)
52
+
53
+ # Define Patch Embedding with optional convolutional layers
54
+ class PatchEmbedding(nn.Module):
55
+ def __init__(self, img_size=32, patch_size=4, in_channels=3, embed_dim=256):
56
+ super(PatchEmbedding, self).__init__()
57
+ self.img_size = img_size
58
+ self.patch_size = patch_size
59
+ self.num_patches = (img_size // patch_size) ** 2
60
+ self.embed_dim = embed_dim
61
+ self.conv_layers = nn.Sequential(
62
+ nn.Conv2d(in_channels, embed_dim // 2, kernel_size=3, stride=1, padding=1),
63
+ nn.BatchNorm2d(embed_dim // 2),
64
+ nn.ReLU(),
65
+ nn.Conv2d(embed_dim // 2, embed_dim, kernel_size=3, stride=1, padding=1),
66
+ nn.BatchNorm2d(embed_dim),
67
+ nn.ReLU(),
68
+ )
69
+ self.proj = nn.Conv2d(embed_dim, embed_dim, kernel_size=patch_size, stride=patch_size)
70
+
71
+ def forward(self, x):
72
+ x = self.conv_layers(x)
73
+ x = self.proj(x) # Shape: [batch_size, embed_dim, num_patches_root, num_patches_root]
74
+ x = x.flatten(2) # Shape: [batch_size, embed_dim, num_patches]
75
+ x = x.transpose(1, 2) # Shape: [batch_size, num_patches, embed_dim]
76
+ return x
77
+
78
+ # Sequential Attention Block
79
+ class SequentialAttentionBlock(nn.Module):
80
+ def __init__(self, embed_dim, num_heads, dropout=0.1):
81
+ super(SequentialAttentionBlock, self).__init__()
82
+ self.attention = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
83
+ self.norm = nn.LayerNorm(embed_dim)
84
+ self.dropout = nn.Dropout(dropout)
85
+
86
+ def forward(self, x):
87
+ # x shape: [seq_length, batch_size, embed_dim]
88
+ seq_length = x.size(0)
89
+ attn_mask = torch.triu(torch.ones(seq_length, seq_length), diagonal=1).bool().to(x.device)
90
+ attn_output, _ = self.attention(x, x, x, attn_mask=attn_mask)
91
+ x = self.norm(x + attn_output)
92
+ return self.dropout(x)
93
+
94
+ # Cyclic Attention Block with CRF
95
+ class CyclicAttentionBlockCRF(nn.Module):
96
+ def __init__(self, embed_dim, num_heads, dropout=0.1):
97
+ super(CyclicAttentionBlockCRF, self).__init__()
98
+ self.attention = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
99
+ self.norm = nn.LayerNorm(embed_dim)
100
+ self.dropout = nn.Dropout(dropout)
101
+ self.cyclic_operator = nn.Linear(embed_dim, embed_dim, bias=False)
102
+
103
+ def forward(self, x):
104
+ attn_output, _ = self.attention(x, x, x)
105
+ x = self.norm(x + attn_output)
106
+ cyclic_term = self.cyclic_alignment(attn_output)
107
+ x = self.norm(x + cyclic_term)
108
+ return self.dropout(x)
109
+
110
+ def cyclic_alignment(self, attn_output):
111
+ cyclic_term = self.cyclic_operator(attn_output)
112
+ cyclic_term = torch.roll(cyclic_term, shifts=1, dims=0)
113
+ return cyclic_term
114
+
115
+ # Combined Transformer Block with additional multi-headed self-attention and sequential attention
116
+ class CombinedTransformerBlock(nn.Module):
117
+ def __init__(self, embed_dim, num_heads, ff_dim, dropout=0.1, dropconnect_p=0.5):
118
+ super(CombinedTransformerBlock, self).__init__()
119
+ self.initial_attention = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropconnect_p)
120
+ self.norm0 = nn.LayerNorm(embed_dim)
121
+
122
+ self.attention1 = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropconnect_p)
123
+ self.norm1 = nn.LayerNorm(embed_dim)
124
+ self.dropconnect = nn.Dropout(dropconnect_p)
125
+ self.cyclic_attention = CyclicAttentionBlockCRF(embed_dim, num_heads, dropout)
126
+ self.sequential_attention = SequentialAttentionBlock(embed_dim, num_heads, dropout)
127
+ self.attention2 = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropconnect_p)
128
+ self.norm2 = nn.LayerNorm(embed_dim)
129
+ self.ff = nn.Sequential(
130
+ nn.Linear(embed_dim, ff_dim),
131
+ nn.ReLU(),
132
+ nn.Linear(ff_dim, embed_dim)
133
+ )
134
+ self.norm3 = nn.LayerNorm(embed_dim)
135
+ self.dropout = nn.Dropout(dropout)
136
+
137
+ def forward(self, x):
138
+ attn_output, _ = self.initial_attention(x, x, x)
139
+ x = self.norm0(x + attn_output)
140
+
141
+ attn_output, _ = self.attention1(x, x, x)
142
+ x = self.norm1(x + attn_output)
143
+ x = self.dropconnect(x)
144
+ x = self.cyclic_attention(x)
145
+ x = self.sequential_attention(x)
146
+ attn_output, _ = self.attention2(x, x, x)
147
+ x = self.norm2(x + attn_output)
148
+ ff_output = self.ff(x)
149
+ x = self.norm3(x + self.dropout(ff_output))
150
+ return x
151
+
152
+ # Decoder Block
153
+ class DecoderBlock(nn.Module):
154
+ def __init__(self, embed_dim, num_heads, ff_dim, dropout=0.1):
155
+ super(DecoderBlock, self).__init__()
156
+ self.attention = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
157
+ self.norm1 = nn.LayerNorm(embed_dim)
158
+ self.cyclic_attention = CyclicAttentionBlockCRF(embed_dim, num_heads, dropout)
159
+ self.ff = nn.Sequential(
160
+ nn.Linear(embed_dim, ff_dim),
161
+ nn.ReLU(),
162
+ nn.Linear(ff_dim, embed_dim)
163
+ )
164
+ self.norm2 = nn.LayerNorm(embed_dim)
165
+ self.dropout = nn.Dropout(dropout)
166
+
167
+ def forward(self, x, encoder_output):
168
+ attn_output, _ = self.attention(x, encoder_output, encoder_output)
169
+ x = self.norm1(x + attn_output)
170
+ x = self.cyclic_attention(x)
171
+ ff_output = self.ff(x)
172
+ x = self.norm2(x + self.dropout(ff_output))
173
+ return x
174
+
175
+ # Custom Transformer Model with increased depth and learnable positional encodings
176
+ class CustomTransformer(nn.Module):
177
+ def __init__(self, embed_dim, num_heads, ff_dim, num_classes, num_layers=6, dropconnect_p=0.5):
178
+ super(CustomTransformer, self).__init__()
179
+ self.embed_dim = embed_dim
180
+ self.num_patches = (32 // 4) ** 2 # Assuming patch_size=4
181
+ self.patch_embedding = PatchEmbedding(embed_dim=embed_dim)
182
+ self.positional_encoding = nn.Parameter(torch.zeros(1, self.num_patches, embed_dim))
183
+ nn.init.trunc_normal_(self.positional_encoding, std=0.02)
184
+
185
+ # Create multiple encoder blocks
186
+ self.encoder_blocks = nn.ModuleList([
187
+ CombinedTransformerBlock(embed_dim, num_heads, ff_dim, dropconnect_p=dropconnect_p)
188
+ for _ in range(num_layers)
189
+ ])
190
+
191
+ # Create multiple decoder blocks
192
+ self.decoder_blocks = nn.ModuleList([
193
+ DecoderBlock(embed_dim, num_heads, ff_dim)
194
+ for _ in range(num_layers)
195
+ ])
196
+
197
+ self.fc = nn.Linear(embed_dim, num_classes)
198
+
199
+ def forward(self, x):
200
+ x = self.patch_embedding(x) # Shape: [batch_size, num_patches, embed_dim]
201
+ x += self.positional_encoding
202
+ x = x.transpose(0, 1) # Shape: [num_patches, batch_size, embed_dim]
203
+
204
+ encoder_output = x
205
+ for encoder in self.encoder_blocks:
206
+ encoder_output = encoder(encoder_output)
207
+
208
+ decoder_output = encoder_output
209
+ for decoder in self.decoder_blocks:
210
+ decoder_output = decoder(decoder_output, encoder_output)
211
+
212
+ decoder_output = decoder_output.mean(dim=0) # Shape: [batch_size, embed_dim]
213
+ logits = self.fc(decoder_output)
214
+ return logits
215
+
216
+ # Initialize two instances of the model for 'model_epoch_125' and 'best_model'
217
+ embed_dim = 512
218
+ num_heads = 32
219
+ ff_dim = 1024
220
+ num_classes = 10
221
+ num_layers = 10 # Ensure it matches the architecture
222
+
223
+ model_epoch_125 = CustomTransformer(embed_dim, num_heads, ff_dim, num_classes, num_layers=num_layers).to(device)
224
+ model_best = CustomTransformer(embed_dim, num_heads, ff_dim, num_classes, num_layers=num_layers).to(device)
225
+
226
+ # Download the models from Hugging Face Hub
227
+ from huggingface_hub import hf_hub_download
228
+
229
+ # Paths where the models will be saved
230
+ model_epoch_125_path = hf_hub_download(repo_id=f"{username}/{model_name_epoch_125}", filename="model_epoch_125.pth")
231
+ model_best_path = hf_hub_download(repo_id=f"{username}/{model_name_best}", filename="model_best.pth")
232
+
233
+ # Load the saved models from Hugging Face Hub
234
+ model_epoch_125.load_state_dict(torch.load(model_epoch_125_path, map_location=device))
235
+ model_best.load_state_dict(torch.load(model_best_path, map_location=device))
236
+
237
+ # Set both models to evaluation mode
238
+ model_epoch_125.eval()
239
+ model_best.eval()
240
+
241
+ # Prepare the feature and label arrays
242
+ test_preds_epoch_125 = []
243
+ test_preds_best = []
244
+ test_labels = []
245
+
246
+ with torch.no_grad():
247
+ for images_test, labels_test in test_loader:
248
+ images_test = images_test.to(device)
249
+
250
+ # Get predictions from model_epoch_125
251
+ logits_epoch_125 = model_epoch_125(images_test)
252
+ probs_epoch_125 = F.softmax(logits_epoch_125, dim=1).cpu().numpy() # Convert to probabilities
253
+
254
+ # Get predictions from model_best
255
+ logits_best = model_best(images_test)
256
+ probs_best = F.softmax(logits_best, dim=1).cpu().numpy() # Convert to probabilities
257
+
258
+ # Store predictions and labels
259
+ test_preds_epoch_125.extend(probs_epoch_125)
260
+ test_preds_best.extend(probs_best)
261
+ test_labels.extend(labels_test.numpy())
262
+
263
+ # Convert predictions to NumPy arrays
264
+ test_preds_epoch_125 = np.array(test_preds_epoch_125)
265
+ test_preds_best = np.array(test_preds_best)
266
+ test_labels = np.array(test_labels)
267
+
268
+ # Stack the predictions from both models to create meta-features
269
+ meta_features = np.hstack((test_preds_epoch_125, test_preds_best)) # Shape: (num_samples, 20)
270
+
271
+ # Split the data for training and validation of the XGBoost meta-learner
272
+ X_train, X_val, y_train, y_val = train_test_split(meta_features, test_labels, test_size=0.2, random_state=42)
273
+
274
+ # Train an XGBoost classifier as a meta-learner
275
+ xgb_model = xgb.XGBClassifier(
276
+ objective='multi:softmax',
277
+ num_class=10,
278
+ eval_metric='mlogloss',
279
+ use_label_encoder=False
280
+ )
281
+
282
+ xgb_model.fit(X_train, y_train)
283
+
284
+ # Validate the XGBoost model
285
+ y_pred_val = xgb_model.predict(X_val)
286
+ val_accuracy = accuracy_score(y_val, y_pred_val)
287
+ print(f'Validation Accuracy of Meta-learner: {val_accuracy * 100:.2f}%')
288
+
289
+ # Test the XGBoost model on the entire test set
290
+ y_pred_test = xgb_model.predict(meta_features)
291
+ test_accuracy = accuracy_score(test_labels, y_pred_test)
292
+ print(f'Test Accuracy of Meta-learner: {test_accuracy * 100:.2f}%')
293
+
294
+ # Plot the confusion matrix for the test set predictions
295
+ cm = confusion_matrix(test_labels, y_pred_test)
296
+ disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=cifar10_test.classes)
297
+ disp.plot(cmap=plt.cm.Blues)
298
+
299
+ # Rotate the x-axis labels to prevent overlapping
300
+ plt.xticks(rotation=45, ha='right')
301
+ plt.title('Confusion Matrix for XGBoost Meta-learner on CIFAR-10 Test Set')
302
+ plt.savefig(os.path.join(save_dir, 'xgboost_meta_confusion_matrix.png'))
303
+ plt.show()
304
+
305
+ # Save the XGBoost model
306
+ xgb_model.save_model(os.path.join(save_dir, 'xgboost_meta_learner.json'))
307
+ print('Meta-learner model saved.')
308
+