chakshu12345 commited on
Commit
8ee4c5d
·
1 Parent(s): 74dbaff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +186 -342
app.py CHANGED
@@ -1,375 +1,219 @@
1
  # -*- coding: utf-8 -*-
2
- """CNN(mnist).ipynb
3
 
4
  Automatically generated by Colaboratory.
5
 
6
  Original file is located at
7
- https://colab.research.google.com/drive/1_4EIIBRbLBfS5tDz6VSgPIIpSv1t03Y7
8
-
9
- CNN are mainly used to classify the images <br>
10
- A basic CNN requires two additional layers called convoluation and pooling before the FNN <br>
11
- CNN involves a Kernel<br>
12
- Kernel is sliding/convuling matrix across the image with two operations<br>
13
- 1. element-wise multiplication
14
- 2. summation
15
- <br>
16
- Now comes the pooling part mainly there are two types of pooling<br>
17
- 1. Max pooling- getting the max element after the kernel iteration over the image
18
- 2. average pooling- getting the average of all the elements in the matrix
19
-
20
- Now comes stride- it means number of steps in each convulation. By default it is 1. <br>
21
- After using stride we can see that the input size becomes lesser so we add zeros symetrically in the matrix so the output becomes the same dimension of input <br>
22
-
23
- The dimension of the output after applying all these <br>
24
- O=(W-K+2P)/25 + 1<br>
25
- W=input <br>
26
- K=kernel size <br>
27
- P=padding=(K-1)/2<br>
28
- S=stride
29
-
30
- # Importing libraries
31
- """
32
-
33
- import torch
34
- import torch.nn as nn
35
- from torchvision import transforms,datasets
36
- from torch.utils.data import dataset, DataLoader
37
- import torchvision.datasets as dsets
38
-
39
- """# Loading the data """
40
-
41
- #we will be using the mnist dataset for this purpose
42
- train_dataset = dsets.MNIST(root='./data',
43
- train=True,
44
- transform=transforms.ToTensor(),
45
- download=True)
46
-
47
- test_dataset = dsets.MNIST(root='./data',
48
- train=False,
49
- transform=transforms.ToTensor())
50
-
51
- #making our dataset iterable
52
- batch_size = 100
53
- n_iters = 3000
54
- num_epochs = n_iters / (len(train_dataset) / batch_size)
55
- num_epochs = int(num_epochs)
56
-
57
- train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
58
- batch_size=batch_size,
59
- shuffle=True)
60
-
61
- test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
62
- batch_size=batch_size,
63
- shuffle=False)
64
-
65
- """# Defining our model """
66
-
67
- class CNN(nn.Module):
68
- def __init__(self):
69
- super(CNN,self).__init__()
70
-
71
- #defining the layers
72
- self.block1=nn.Sequential(nn.Conv2d(1,16,kernel_size=(5,5),stride=1,padding=2),
73
- nn.ReLU(),
74
- nn.MaxPool2d(kernel_size=2))
75
- #output after this operation
76
- #(28-5+2/1 +1 =28 then max pooling 28/2=14)
77
-
78
- self.block2=nn.Sequential(nn.Conv2d(16,32,kernel_size=(5,5),stride=1,padding=2),
79
- nn.ReLU(),
80
- nn.MaxPool2d(kernel_size=2))
81
-
82
- #output after this
83
- #(14-5+2*2/1 +1 = 13+1=14 then 14/2= 7)
84
-
85
- self.layer=nn.Linear(32*7*7,10)
86
-
87
 
88
- def forward(self,x):
89
- x=self.block1(x)
90
- x=self.block2(x)
91
- #flatteing the output
92
- x = x.view(x.size(0), -1)
93
- #now feeding inot the linear network
94
- x = self.layer(x)
95
-
96
- return x
97
-
98
- #making instance
99
- model=CNN()
100
- print(model)
101
-
102
- """# Training the model"""
103
-
104
- #initialising the loss and optimizer
105
- criterion=nn.CrossEntropyLoss()
106
- learning_rate = 0.01
107
- optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
108
-
109
- print(model.parameters())
110
 
111
- print(len(list(model.parameters())))
 
 
 
112
 
113
- # Convolution 1: 16 Kernels
114
- print(list(model.parameters())[0].size())
 
 
115
 
116
- # Convolution 1 Bias: 16 Kernels
117
- print(list(model.parameters())[1].size())
118
 
119
- # Convolution 2: 32 Kernels with depth = 16
120
- print(list(model.parameters())[2].size())
 
 
 
121
 
122
- # Convolution 2 Bias: 32 Kernels with depth = 16
123
- print(list(model.parameters())[3].size())
124
 
125
- # Fully Connected Layer 1
126
- print(list(model.parameters())[4].size())
127
 
128
- # Fully Connected Layer Bias
129
- print(list(model.parameters())[5].size())
 
130
 
131
- #lets begin the training
132
- iter=0
133
 
134
- for epochs in range(num_epochs):
135
- for i,(images,labels) in enumerate(train_loader):
136
 
137
- #loading the images
138
- images.requires_grad_()
139
 
140
- #first clearning the parameters
141
- optimizer.zero_grad()
142
-
143
- #calclauting the output and loss
144
- output=model(images)
145
 
146
- loss=criterion(output,labels)
147
 
148
- #backprapgating the loss
149
- loss.backward()
 
150
 
151
- #updating the parameters
152
- optimizer.step()
153
 
154
- iter+=1
 
 
 
155
 
156
- #printing for every 500 iterations
157
- if iter%500==0:
158
- # Calculate Accuracy
159
- correct = 0
160
- total = 0
161
 
162
- #now iterate through the test dataset
163
 
164
- for images,labels in test_loader:
165
- images = images.requires_grad_()
166
- outputs = model(images)
167
- _, predicted = torch.max(outputs.data, 1)
168
- total += labels.size(0)
169
- correct += (predicted == labels).sum()
170
-
171
- accuracy = 100 * correct / total
172
 
173
- # Print Loss
174
- print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.item(), accuracy))
175
 
176
- """Accuracy came out to 96.63
 
177
 
178
- # Model 2
 
179
 
180
- This involves average pooling layer
181
- """
182
 
183
- class CNN2(nn.Module):
184
- def __init__(self):
185
- super().__init__()
186
-
187
- #defining the layers
188
- self.block1=nn.Sequential(nn.Conv2d(1,16,kernel_size=(5,5),stride=1,padding=2),
189
- nn.ReLU(),
190
- nn.AvgPool2d(kernel_size=2))
191
- #output after this operation
192
- #(28-5+2/1 +1 =28 then max pooling 28/2=14)
193
-
194
- self.block2=nn.Sequential(nn.Conv2d(16,32,kernel_size=(5,5),stride=1,padding=2),
195
- nn.ReLU(),
196
- nn.AvgPool2d(kernel_size=2))
197
-
198
- #output after this
199
- #(14-5+2*2/1 +1 = 13+1=14 then 14/2= 7)
200
-
201
- self.layer=nn.Linear(32*7*7,10)
202
-
203
-
204
- def forward(self,x):
205
- x=self.block1(x)
206
- x=self.block2(x)
207
- #flatteing the output
208
- x = x.view(x.size(0), -1)
209
- #now feeding inot the linear network
210
- x = self.layer(x)
211
-
212
- return x
213
-
214
- #making instance
215
- model2=CNN2()
216
- print(model2)
217
-
218
- learning_rate = 0.01
219
- optimizer = torch.optim.SGD(model2.parameters(), lr=learning_rate)
220
-
221
- #lets begin the training
222
- iter=0
223
-
224
- for epochs in range(num_epochs):
225
- for i,(images,labels) in enumerate(train_loader):
226
-
227
- #loading the images
228
- images.requires_grad_()
229
-
230
- #first clearning the parameters
231
- optimizer.zero_grad()
232
-
233
- #calclauting the output and loss
234
- output=model2(images)
235
-
236
- loss=criterion(output,labels)
237
-
238
- #backprapgating the loss
239
- loss.backward()
240
-
241
- #updating the parameters
242
- optimizer.step()
243
-
244
- iter+=1
245
-
246
- #printing for every 500 iterations
247
- if iter%500==0:
248
- # Calculate Accuracy
249
- correct = 0
250
- total = 0
251
-
252
- #now iterate through the test dataset
253
-
254
- for images,labels in test_loader:
255
- images = images.requires_grad_()
256
- outputs = model2(images)
257
- _, predicted = torch.max(outputs.data, 1)
258
- total += labels.size(0)
259
- correct += (predicted == labels).sum()
260
-
261
- accuracy = 100 * correct / total
262
-
263
- # Print Loss
264
- print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.item(), accuracy))
265
-
266
- """Accuracy came out to be 93 %
267
-
268
- # Model 3
269
- This involves vaild pooling which means smaller output size
270
- """
271
 
272
- class CNN3(nn.Module):
273
  def __init__(self):
274
- super(CNN3, self).__init__()
275
-
276
- # Convolution 1
277
- self.cnn1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1, padding=0)
278
- self.relu1 = nn.ReLU()
279
-
280
- # Max pool 1
281
- self.maxpool1 = nn.MaxPool2d(kernel_size=2)
282
-
283
- # Convolution 2
284
- self.cnn2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0)
285
- self.relu2 = nn.ReLU()
286
-
287
- # Max pool 2
288
- self.maxpool2 = nn.MaxPool2d(kernel_size=2)
289
-
290
- # Fully connected 1 (readout)
291
- self.fc1 = nn.Linear(32 * 4 * 4, 10)
 
 
 
292
 
293
  def forward(self, x):
294
- # Convolution 1
295
- out = self.cnn1(x)
296
- out = self.relu1(out)
297
-
298
- # Max pool 1
299
- out = self.maxpool1(out)
300
-
301
- # Convolution 2
302
- out = self.cnn2(out)
303
- out = self.relu2(out)
304
-
305
- # Max pool 2
306
- out = self.maxpool2(out)
307
-
308
- # Resize
309
- # Original size: (100, 32, 7, 7)
310
- # out.size(0): 100
311
- # New out size: (100, 32*7*7)
312
- out = out.view(out.size(0), -1)
313
-
314
- # Linear function (readout)
315
- out = self.fc1(out)
316
-
317
- return out
318
-
319
- #making instance
320
- model3=CNN3()
321
- print(model3)
322
-
323
- learning_rate = 0.01
324
- optimizer = torch.optim.SGD(model3.parameters(), lr=learning_rate)
325
-
326
- #lets begin the training
327
- iter=0
328
-
329
- for epochs in range(num_epochs):
330
- for i,(images,labels) in enumerate(train_loader):
331
-
332
- #loading the images
333
- images.requires_grad_()
334
-
335
- #first clearning the parameters
336
- optimizer.zero_grad()
337
-
338
- #calclauting the output and loss
339
- output=model3(images)
340
-
341
- loss=criterion(output,labels)
342
-
343
- #backprapgating the loss
344
- loss.backward()
345
-
346
- #updating the parameters
347
- optimizer.step()
348
-
349
- iter+=1
350
-
351
- #printing for every 500 iterations
352
- if iter%500==0:
353
- # Calculate Accuracy
354
- correct = 0
355
- total = 0
356
-
357
- #now iterate through the test dataset
358
-
359
- for images,labels in test_loader:
360
- images = images.requires_grad_()
361
- outputs = model3(images)
362
- _, predicted = torch.max(outputs.data, 1)
363
- total += labels.size(0)
364
- correct += (predicted == labels).sum()
365
-
366
- accuracy = 100 * correct / total
367
-
368
- # Print Loss
369
- print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.item(), accuracy))
370
-
371
- """Accuracy is 96 for the model 3
372
-
373
- We can see in the above models the model with max pooling and padding=1 gave the best accuracy
374
- """
375
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # -*- coding: utf-8 -*-
2
+ """B20AI006_MNIST_Trial.ipynb
3
 
4
  Automatically generated by Colaboratory.
5
 
6
  Original file is located at
7
+ https://colab.research.google.com/drive/1xG500b51pcVvYpP_fgsQ2IgEPQ3TLIup
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ ###Importing Libraries
10
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # Commented out IPython magic to ensure Python compatibility.
13
+ import os
14
+ import sys
15
+ import numpy as np
16
 
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+ import torch.optim as optim
21
 
22
+ import torch.profiler
23
+ from torch.utils.data import DataLoader, TensorDataset
24
 
25
+ import torchvision.utils
26
+ from torchvision import models
27
+ import torchvision.datasets as dsets
28
+ import torchvision.transforms as transforms
29
+ from torchvision.models import resnet18, ResNet18_Weights
30
 
 
 
31
 
32
+ from sklearn.manifold import TSNE
 
33
 
34
+ from sklearn.metrics import accuracy_score
35
+ import matplotlib.pyplot as plt
36
+ # %matplotlib inline
37
 
38
+ import warnings
39
+ warnings.filterwarnings('ignore')
40
 
41
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
42
+ device
43
 
44
+ """##Q1
 
45
 
46
+ ###Loading CIFAR10
47
+ """
 
 
 
48
 
49
+ # Define the transformations to apply to the images
50
 
51
+ transform_train = transforms.Compose([
52
+ transforms.ToTensor()
53
+ ])
54
 
 
 
55
 
56
+ transform = transforms.Compose(
57
+ [
58
+ transforms.ToTensor(),
59
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
60
 
61
+ transform_test = transforms.Compose([
62
+ transforms.ToTensor()
63
+ ])
 
 
64
 
 
65
 
66
+ mnist_train = dsets.MNIST(root='./', train=True,
67
+ download=True, transform=transform_train)
68
+ mnist_test = dsets.MNIST(root='./', train=False,
69
+ download=True, transform=transform_test)
 
 
 
 
70
 
 
 
71
 
72
+ train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=64,
73
+ shuffle=True, num_workers=1)
74
 
75
+ test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=64,
76
+ shuffle=False, num_workers=1)
77
 
78
+ """###Defining CNN model as mentioned in question"""
 
79
 
80
+ import torch
81
+ import torch.nn as nn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ class MNISTModel(nn.Module):
84
  def __init__(self):
85
+ super(MNISTModel, self).__init__()
86
+ self.conv_layers = nn.Sequential(
87
+ nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1),
88
+ nn.ReLU(inplace=True),
89
+ nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1,padding=1),
90
+ nn.ReLU(inplace=True),
91
+ nn.MaxPool2d(kernel_size=2),
92
+ # nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1,padding=1),
93
+ # nn.ReLU(inplace=True),
94
+ # nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1,padding=1),
95
+ # nn.ReLU(inplace=True),
96
+ # nn.MaxPool2d(kernel_size=2),
97
+ # nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1,padding=1),
98
+ # nn.ReLU(inplace=True),
99
+ # nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1,padding=1),
100
+ # nn.ReLU(inplace=True),
101
+ # nn.MaxPool2d(kernel_size=2)
102
+ )
103
+ self.fc_layers = nn.Sequential(
104
+ nn.Linear(in_features=3136, out_features=10)
105
+ )
106
 
107
  def forward(self, x):
108
+ x = self.conv_layers(x)
109
+ x = torch.flatten(x, 1)
110
+ # print(x.shape)
111
+ x = self.fc_layers(x)
112
+ return x
113
+
114
+ model = MNISTModel()
115
+ criterion = nn.CrossEntropyLoss()
116
+ optimizer = optim.Adam(model.parameters(), lr=0.001)
117
+
118
+ """###Training the model"""
119
+
120
+ def train_main(train_loader, test_loader, num_epochs, optimizer, model, device='cpu'):
121
+ # Lists to store the train and test losses and accuracies
122
+ train_losses = []
123
+ test_losses = []
124
+ train_accs = []
125
+ test_accs = []
126
+
127
+ criterion = nn.CrossEntropyLoss()
128
+
129
+ model.to(device)
130
+
131
+ for epoch in range(num_epochs):
132
+ train_loss = 0
133
+ train_correct = 0
134
+ train_total = 0
135
+
136
+ for i, (images, labels) in enumerate(train_loader):
137
+ # Send inputs and targets to GPU if available
138
+ images = images.to(device)
139
+ labels = labels.to(device)
140
+
141
+ optimizer.zero_grad() # zero the gradients
142
+ outputs = model(images)
143
+ loss = criterion(outputs, labels) # calculate the loss
144
+ loss.backward() # backpropagation
145
+ optimizer.step() # update weights
146
+ train_loss += loss.item()
147
+
148
+ # calculate the training accuracy
149
+ _, predicted = torch.max(outputs.data, 1)
150
+ train_total += labels.size(0)
151
+ train_correct += (predicted == labels).sum().item()
152
+
153
+ # prof.step() ##Taking step in tensorboard profiler
154
+
155
+ # evaluate the model on the test set
156
+ test_loss = 0
157
+ test_correct = 0
158
+ test_total = 0
159
+ with torch.no_grad():
160
+ for images, labels in test_loader:
161
+ # Send inputs and targets to GPU if available
162
+ images = images.to(device)
163
+ labels = labels.to(device)
164
+
165
+ outputs = model(images)
166
+ loss = criterion(outputs, labels)
167
+ test_loss += loss.item()
168
+
169
+ # calculate the testing accuracy
170
+ _, predicted = torch.max(outputs.data, 1)
171
+ test_total += labels.size(0)
172
+ test_correct += (predicted == labels).sum().item()
173
+
174
+ # append the average loss and accuracy for the epoch to the lists
175
+ train_loss /= len(train_loader)
176
+ test_loss /= len(test_loader)
177
+ train_acc = 100.0 * train_correct / train_total
178
+ test_acc = 100.0 * test_correct / test_total
179
+ train_losses.append(train_loss)
180
+ test_losses.append(test_loss)
181
+ train_accs.append(train_acc)
182
+ test_accs.append(test_acc)
183
+ print('Epoch: {}, train loss: {:.4f}, test loss: {:.4f}, train accuracy: {:.2f}%, test accuracy: {:.2f}%'.format(epoch+1, train_loss, test_loss, train_acc, test_acc))
184
+
185
+ # save the results to a text file
186
+ with open("results.txt", "w") as f:
187
+ for epoch in range(num_epochs):
188
+ f.write("Epoch: {}, train loss: {:.4f}, test loss: {:.4f}, train accuracy: {:.2f}%, test accuracy: {:.2f}%\n".format(epoch+1, train_losses[epoch], test_losses[epoch], train_accs[epoch], test_accs[epoch]))
189
+
190
+ # plot the loss and accuracy curves side by side
191
+ fig, axs = plt.subplots(1, 2, figsize=(12, 4))
192
+ axs[0].plot(train_losses, label='Train Loss')
193
+ axs[0].plot(test_losses, label='Test Loss')
194
+ axs[0].set_xlabel('Epoch')
195
+ axs[0].set_ylabel('Loss')
196
+ axs[0].legend()
197
+ axs[1].plot(train_accs, label='Train Accuracy')
198
+ axs[1].plot(test_accs, label='Test Accuracy')
199
+ axs[1].set_xlabel('Epoch')
200
+ axs[1].set_ylabel('Accuracy')
201
+ axs[1].legend()
202
+ plt.savefig('loss_and_accuracy.png')
203
+ torch.save(model, 'trained_model.pt')
204
+ plt.show()
205
+
206
+ num_epochs=3
207
+ train_main(train_loader, test_loader, num_epochs, optimizer, model, device)
208
+
209
+
210
+
211
+ """###ResNet18 model"""
212
+
213
+ # model = models.resnet18(pretrained=False)
214
+ # num_ftrs = model.fc.in_features
215
+ # model.fc = nn.Linear(num_ftrs, 10)
216
+
217
+ # # model.to(device)
218
+ # criterion = nn.CrossEntropyLoss()
219
+ # optimizer = optim.Adam(model.parameters(), lr=0.001)