Jovian
⭐️
Sign In
In [1]:
import torch
import torchvision
from torchvision.datasets import MNIST
In [2]:
MNIST??
In [8]:
len(dataset)
Out[8]:
60000
In [9]:
test_dataset = MNIST(root='data', train=False)
In [10]:
len(test_dataset)
Out[10]:
10000
In [ ]:
!pip install matplotlib
In [16]:
import matplotlib.pyplot as plt
In [19]:
image, label = dataset[0]
plt.imshow(image)
print('Label:', label.item())
Label: 5
Notebook Image
In [20]:
import torchvision.transforms as transforms
In [21]:
dataset = MNIST(root='data/', 
                train=True,
                transform=transforms.ToTensor())
In [22]:
test_dataset = MNIST(root='data/', 
                train=False,
                transform=transforms.ToTensor())
In [23]:
print(len(dataset))
print(len(test_dataset))
60000 10000
In [35]:
img_tensor, label = dataset[0]
print(img_tensor.shape, label.item())
print(torch.max(img_tensor), torch.min(img_tensor))
plt.imshow(img_tensor[0], cmap='gray');
torch.Size([1, 28, 28]) 5 tensor(1.) tensor(0.)
Notebook Image
In [36]:
import numpy as np

def split_indices(n, val_pct=0.2):
    n_val = int(val_pct*n)
    idxs = np.random.permutation(n)
    return idxs[n_val:], idxs[:n_val]
In [37]:
train_indices, val_indices = split_indices(len(dataset), 0.2)
In [38]:
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.dataloader import DataLoader

train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
batch_size = 100

train_loader = DataLoader(dataset, 
                          batch_size, 
                          sampler=train_sampler)

val_loader = DataLoader(dataset,
                        batch_size, 
                        sampler=val_sampler)
In [39]:
import torch.nn as nn

input_size = 28*28
num_classes = 10

# Logistic regression model
model = nn.Linear(input_size, num_classes)
In [40]:
preds = None
for images, labels in train_loader:
    inputs = images.reshape(-1, 28*28)
    targets = labels
    preds = model(inputs)
    break
print(preds.shape)
print(preds[0])
torch.Size([100, 10]) tensor([-0.4368, 0.4233, -0.2336, -0.3424, -0.2398, -0.0105, -0.6721, 0.2514, 0.1005, -0.4405], grad_fn=<SelectBackward>)
In [41]:
import torch.nn.functional as F

probs = F.softmax(preds, dim=1)
print(probs[0].data)
print("Sum: ", torch.sum(probs[0]).item())
tensor([0.0718, 0.1697, 0.0880, 0.0789, 0.0875, 0.1100, 0.0568, 0.1429, 0.1229, 0.0716]) Sum: 1.0
In [43]:
loss_fn = F.cross_entropy
loss = loss_fn(preds, targets)
loss
Out[43]:
tensor(2.3115, grad_fn=<NllLossBackward>)
In [52]:
num_epochs = 5
batch_size = 200
learning_rate = 0.001
  
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        #images = Variable(images.view(-1, 28*28))
        #labels = Variable(labels)
        inputs = images.reshape(-1, 28*28)
        targets = labels
        
        # Forward + Backward + Optimize
        preds = model(inputs)
        loss = loss_fn(preds, labels)
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        
        if (i+1) % 200 == 0:
            print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f' 
                   % (epoch+1, num_epochs, i+1, len(train_loader)//batch_size, loss))
Epoch: [1/5], Step: [200/2], Loss: 0.7393 Epoch: [1/5], Step: [400/2], Loss: 0.8346 Epoch: [2/5], Step: [200/2], Loss: 0.7746 Epoch: [2/5], Step: [400/2], Loss: 0.7865 Epoch: [3/5], Step: [200/2], Loss: 0.7471 Epoch: [3/5], Step: [400/2], Loss: 0.8132 Epoch: [4/5], Step: [200/2], Loss: 0.6073 Epoch: [4/5], Step: [400/2], Loss: 0.6430 Epoch: [5/5], Step: [200/2], Loss: 0.7454 Epoch: [5/5], Step: [400/2], Loss: 0.6255
In [53]:
correct = 0
total = 0
for images, labels in val_loader:
    images = images.reshape(-1, 28*28)
    preds = model(images)
    _, predicted = torch.max(preds.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()
    
print('Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
Accuracy of the model on the 10000 test images: 85 %
In [ ]:
import jovian
jovian.commit()
[jovian] Saving notebook..
In [ ]: