Spaces:
Runtime error
Runtime error
# -*- coding: utf-8 -*- | |
"""B20AI006_MNIST_Trial.ipynb | |
Automatically generated by Colaboratory. | |
Original file is located at | |
https://colab.research.google.com/drive/1xG500b51pcVvYpP_fgsQ2IgEPQ3TLIup | |
###Importing Libraries | |
""" | |
# Commented out IPython magic to ensure Python compatibility. | |
import os | |
import sys | |
import numpy as np | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import torch.optim as optim | |
import torch.profiler | |
from torch.utils.data import DataLoader, TensorDataset | |
import torchvision.utils | |
from torchvision import models | |
import torchvision.datasets as dsets | |
import torchvision.transforms as transforms | |
from torchvision.models import resnet18, ResNet18_Weights | |
from sklearn.manifold import TSNE | |
from sklearn.metrics import accuracy_score | |
import matplotlib.pyplot as plt | |
# %matplotlib inline | |
import warnings | |
warnings.filterwarnings('ignore') | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
device | |
"""##Q1 | |
###Loading CIFAR10 | |
""" | |
# Define the transformations to apply to the images | |
transform_train = transforms.Compose([ | |
transforms.ToTensor() | |
]) | |
transform = transforms.Compose( | |
[ | |
transforms.ToTensor(), | |
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) | |
transform_test = transforms.Compose([ | |
transforms.ToTensor() | |
]) | |
mnist_train = dsets.MNIST(root='./', train=True, | |
download=True, transform=transform_train) | |
mnist_test = dsets.MNIST(root='./', train=False, | |
download=True, transform=transform_test) | |
train_loader = torch.utils.data.DataLoader(mnist_train, batch_size=64, | |
shuffle=True, num_workers=1) | |
test_loader = torch.utils.data.DataLoader(mnist_test, batch_size=64, | |
shuffle=False, num_workers=1) | |
"""###Defining CNN model as mentioned in question""" | |
import torch | |
import torch.nn as nn | |
class MNISTModel(nn.Module): | |
def __init__(self): | |
super(MNISTModel, self).__init__() | |
self.conv_layers = nn.Sequential( | |
nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, padding=1), | |
nn.ReLU(inplace=True), | |
nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1,padding=1), | |
nn.ReLU(inplace=True), | |
nn.MaxPool2d(kernel_size=2), | |
# nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1,padding=1), | |
# nn.ReLU(inplace=True), | |
# nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1,padding=1), | |
# nn.ReLU(inplace=True), | |
# nn.MaxPool2d(kernel_size=2), | |
# nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1,padding=1), | |
# nn.ReLU(inplace=True), | |
# nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1,padding=1), | |
# nn.ReLU(inplace=True), | |
# nn.MaxPool2d(kernel_size=2) | |
) | |
self.fc_layers = nn.Sequential( | |
nn.Linear(in_features=3136, out_features=10) | |
) | |
def forward(self, x): | |
x = self.conv_layers(x) | |
x = torch.flatten(x, 1) | |
# print(x.shape) | |
x = self.fc_layers(x) | |
return x | |
model = MNISTModel() | |
criterion = nn.CrossEntropyLoss() | |
optimizer = optim.Adam(model.parameters(), lr=0.001) | |
"""###Training the model""" | |
def train_main(train_loader, test_loader, num_epochs, optimizer, model, device='cpu'): | |
# Lists to store the train and test losses and accuracies | |
train_losses = [] | |
test_losses = [] | |
train_accs = [] | |
test_accs = [] | |
criterion = nn.CrossEntropyLoss() | |
model.to(device) | |
for epoch in range(num_epochs): | |
train_loss = 0 | |
train_correct = 0 | |
train_total = 0 | |
for i, (images, labels) in enumerate(train_loader): | |
# Send inputs and targets to GPU if available | |
images = images.to(device) | |
labels = labels.to(device) | |
optimizer.zero_grad() # zero the gradients | |
outputs = model(images) | |
loss = criterion(outputs, labels) # calculate the loss | |
loss.backward() # backpropagation | |
optimizer.step() # update weights | |
train_loss += loss.item() | |
# calculate the training accuracy | |
_, predicted = torch.max(outputs.data, 1) | |
train_total += labels.size(0) | |
train_correct += (predicted == labels).sum().item() | |
# prof.step() ##Taking step in tensorboard profiler | |
# evaluate the model on the test set | |
test_loss = 0 | |
test_correct = 0 | |
test_total = 0 | |
with torch.no_grad(): | |
for images, labels in test_loader: | |
# Send inputs and targets to GPU if available | |
images = images.to(device) | |
labels = labels.to(device) | |
outputs = model(images) | |
loss = criterion(outputs, labels) | |
test_loss += loss.item() | |
# calculate the testing accuracy | |
_, predicted = torch.max(outputs.data, 1) | |
test_total += labels.size(0) | |
test_correct += (predicted == labels).sum().item() | |
# append the average loss and accuracy for the epoch to the lists | |
train_loss /= len(train_loader) | |
test_loss /= len(test_loader) | |
train_acc = 100.0 * train_correct / train_total | |
test_acc = 100.0 * test_correct / test_total | |
train_losses.append(train_loss) | |
test_losses.append(test_loss) | |
train_accs.append(train_acc) | |
test_accs.append(test_acc) | |
print('Epoch: {}, train loss: {:.4f}, test loss: {:.4f}, train accuracy: {:.2f}%, test accuracy: {:.2f}%'.format(epoch+1, train_loss, test_loss, train_acc, test_acc)) | |
print("Saving the model") | |
torch.save(model, './trained_model.pt') | |
print("Model saved successfully") | |
# save the results to a text file | |
print("Saving training logs") | |
with open("./results.txt", "w") as f: | |
for epoch in range(num_epochs): | |
f.write("Epoch: {}, train loss: {:.4f}, test loss: {:.4f}, train accuracy: {:.2f}%, test accuracy: {:.2f}%\n".format(epoch+1, train_losses[epoch], test_losses[epoch], train_accs[epoch], test_accs[epoch])) | |
print("Logs saved") | |
num_epochs=1 | |
train_main(train_loader, test_loader, num_epochs, optimizer, model, device) | |
"""###ResNet18 model""" | |
# model = models.resnet18(pretrained=False) | |
# num_ftrs = model.fc.in_features | |
# model.fc = nn.Linear(num_ftrs, 10) | |
# # model.to(device) | |
# criterion = nn.CrossEntropyLoss() | |
# optimizer = optim.Adam(model.parameters(), lr=0.001) |