Jovian
⭐️
Sign In
In [4]:
import numpy as np
import torch
import jovian
In [5]:
inputs = np.array([[73, 67, 43],
                   [91, 88, 64],
                   [87, 134, 58],
                   [102, 43, 37],
                   [69, 96, 70]], dtype='float32')
In [6]:
targets = np.array([[56, 70],
                    [81, 101],
                    [119, 133],
                    [22, 37],
                    [103, 119]], dtype='float32')
In [7]:
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
print(inputs)
print(targets)
tensor([[ 73., 67., 43.], [ 91., 88., 64.], [ 87., 134., 58.], [102., 43., 37.], [ 69., 96., 70.]]) tensor([[ 56., 70.], [ 81., 101.], [119., 133.], [ 22., 37.], [103., 119.]])
In [8]:
w = torch.randn(2, 3, requires_grad=True)
b = torch.randn(2, requires_grad=True)
print(w)
print(b)
tensor([[ 0.1715, 0.6757, -0.3921], [-0.1155, 1.7360, 1.1818]], requires_grad=True) tensor([ 0.3256, -0.8298], requires_grad=True)
In [9]:
def model(x):
    return x @ w.t() + b
In [10]:
preds = model(inputs)
print(preds)
tensor([[ 41.2560, 157.8669], [ 50.2985, 217.0611], [ 83.0460, 290.2899], [ 32.3661, 105.7618], [ 49.5782, 240.5811]], grad_fn=<AddBackward0>)
In [11]:
print(targets)
tensor([[ 56., 70.], [ 81., 101.], [119., 133.], [ 22., 37.], [103., 119.]])
In [12]:
# Mean Squared Error (MSE)
def mse(t1, t2):
    diff = t1 - t2
    return torch.sum(diff * diff) / diff.numel()
In [13]:
loss = mse(preds, targets)
print(loss)
tensor(7085.5049, grad_fn=<DivBackward0>)
In [14]:
loss.backward()
In [15]:
print(w)
tensor([[ 0.1715, 0.6757, -0.3921], [-0.1155, 1.7360, 1.1818]], requires_grad=True)
In [16]:
print(w.grad)
tensor([[-1925.3833, -2638.0352, -1608.0413], [ 9212.5742, 10361.1709, 6276.7734]])
In [17]:
w.grad.zero_()
b.grad.zero_()
print(w.grad)
print(b.grad)
tensor([[0., 0., 0.], [0., 0., 0.]]) tensor([0., 0.])
In [18]:
preds = model(inputs)
print(preds)
tensor([[ 41.2560, 157.8669], [ 50.2985, 217.0611], [ 83.0460, 290.2899], [ 32.3661, 105.7618], [ 49.5782, 240.5811]], grad_fn=<AddBackward0>)
In [20]:
loss = mse(preds, targets)
print(loss)
tensor(7085.5049, grad_fn=<DivBackward0>)
In [21]:
loss.backward()
print(w.grad)
print(b.grad)
tensor([[-1925.3833, -2638.0352, -1608.0413], [ 9212.5742, 10361.1709, 6276.7734]]) tensor([-24.8911, 110.3122])
In [23]:
with torch.no_grad():
    w -= w.grad * 1e-5
    b -= b.grad * 1e-5
    w.grad.zero_()
    b.grad.zero_()
In [24]:
print(w)
print(b)
tensor([[ 0.2100, 0.7284, -0.3599], [-0.2998, 1.5288, 1.0562]], requires_grad=True) tensor([ 0.3261, -0.8320], requires_grad=True)
In [25]:
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
tensor(3063.9771, grad_fn=<DivBackward0>)
In [27]:
for i in range(100):
    preds = model(inputs)
    loss = mse(preds, targets)
    loss.backward()
    with torch.no_grad():
        w -= w.grad * 1e-5
        b -= b.grad * 1e-5
        w.grad.zero_()
        b.grad.zero_()
In [28]:
print(loss)
tensor(98.1174, grad_fn=<DivBackward0>)
In [29]:
preds
Out[29]:
tensor([[ 60.5600,  68.2216],
        [ 77.7041,  98.3389],
        [123.4771, 141.6139],
        [ 39.8118,  25.6020],
        [ 83.2209, 121.5778]], grad_fn=<AddBackward0>)
In [30]:
targets
Out[30]:
tensor([[ 56.,  70.],
        [ 81., 101.],
        [119., 133.],
        [ 22.,  37.],
        [103., 119.]])
In [31]:
import torch.nn as nn
In [44]:
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58], 
                   [102, 43, 37], [69, 96, 70], [73, 67, 43], 
                   [91, 88, 64], [87, 134, 58], [102, 43, 37], 
                   [69, 96, 70], [73, 67, 43], [91, 88, 64], 
                   [87, 134, 58], [102, 43, 37], [69, 96, 70]], 
                  dtype='float32')

# Targets (apples, oranges)
targets = np.array([[56, 70], [81, 101], [119, 133], 
                    [22, 37], [103, 119], [56, 70], 
                    [81, 101], [119, 133], [22, 37], 
                    [103, 119], [56, 70], [81, 101], 
                    [119, 133], [22, 37], [103, 119]], 
                   dtype='float32')

inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
In [35]:
from torch.utils.data import TensorDataset
In [36]:
train_ds = TensorDataset(inputs, targets)
train_ds[0:3]
Out[36]:
(tensor([[ 73.,  67.,  43.],
         [ 91.,  88.,  64.],
         [ 87., 134.,  58.]]), tensor([[ 56.,  70.],
         [ 81., 101.],
         [119., 133.]]))
In [37]:
from torch.utils.data import DataLoader
In [38]:
batch_size = 5
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
In [39]:
for xb, yb in train_dl:
    print(xb)
    print(yb)
    break
tensor([[ 91., 88., 64.], [ 91., 88., 64.], [ 73., 67., 43.], [ 69., 96., 70.], [ 87., 134., 58.]]) tensor([[ 81., 101.], [ 81., 101.], [ 56., 70.], [103., 119.], [119., 133.]])
In [48]:
model = nn.Linear(3, 2)
print(model.weight)
print(model.bias)
Parameter containing: tensor([[-0.1633, -0.1732, -0.3919], [-0.5531, -0.2251, -0.2178]], requires_grad=True) Parameter containing: tensor([ 0.2345, -0.4235], requires_grad=True)
In [49]:
list(model.parameters())
Out[49]:
[Parameter containing:
 tensor([[-0.1633, -0.1732, -0.3919],
         [-0.5531, -0.2251, -0.2178]], requires_grad=True),
 Parameter containing:
 tensor([ 0.2345, -0.4235], requires_grad=True)]
In [50]:
preds = model(inputs)
preds
Out[50]:
tensor([[-40.1375, -65.2443],
        [-54.9423, -84.5007],
        [-59.9050, -91.3336],
        [-38.3643, -74.5754],
        [-55.0872, -75.4404],
        [-40.1375, -65.2443],
        [-54.9423, -84.5007],
        [-59.9050, -91.3336],
        [-38.3643, -74.5754],
        [-55.0872, -75.4404],
        [-40.1375, -65.2443],
        [-54.9423, -84.5007],
        [-59.9050, -91.3336],
        [-38.3643, -74.5754],
        [-55.0872, -75.4404]], grad_fn=<AddmmBackward>)
In [52]:
import torch.nn.functional as F
In [53]:
loss_fn = F.mse_loss
In [54]:
loss = loss_fn(model(inputs), targets)
print(loss)
tensor(24164.8379, grad_fn=<MseLossBackward>)
In [55]:
opt = torch.optim.SGD(model.parameters(), lr=1e-5)
In [56]:
def fit(num_epochs, model, loss_fn, opt):
    for epoch in range(num_epochs):
        for xb, yb in train_dl:
            pred = model(xb)
            loss = loss_fn(pred, yb)
            loss.backward()
            opt.step()
            opt.zero_grad()
        if (epoch + 1) % 10 == 0:
            print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
In [ ]:
jovian.commit()
[jovian] Saving notebook..
In [ ]: