Jovian
⭐️
Sign In

System setup

If you want to follow along and run the code as you read, you can clone this notebook, install the required dependencies, and start Jupyter by running the following commands on the terminal:

pip install jovian --upgrade    # Install the jovian library 
jovian clone <notebook_id>      # Download notebook & dependencies
cd 02-linear-regression         # Enter the created directory 
jovian install                  # Install the dependencies
conda activate 02-linear-regression # Activate virtual environment
jupyter notebook                # Start Jupyter

You can find the notebook_id by cliking the Clone button at the top of this page on Jovian. On older versions of conda, you might need to run source activate 02-linear-regression to activate the environment. For a more detailed explanation of the above steps, check out the System setup section in the previous notebook.

In [1]:
import numpy as np
import torch
In [2]:
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43], 
                   [91, 88, 64], 
                   [87, 134, 58], 
                   [102, 43, 37], 
                   [69, 96, 70]], dtype='float32')
In [3]:
# Targets (apples, oranges)
targets = np.array([[56, 70], 
                    [81, 101], 
                    [119, 133], 
                    [22, 37], 
                    [103, 119]], dtype='float32')
In [4]:
# Convert inputs and targets to tensors
inputs = torch.tensor(inputs)
targets = torch.tensor(targets)
print(inputs)
print(targets)
tensor([[ 73., 67., 43.], [ 91., 88., 64.], [ 87., 134., 58.], [102., 43., 37.], [ 69., 96., 70.]]) tensor([[ 56., 70.], [ 81., 101.], [119., 133.], [ 22., 37.], [103., 119.]])
In [5]:
# Weights and biases
w = torch.randn(2, 3, requires_grad=True)
b = torch.randn(2, requires_grad=True)
print(w)
print(b)
tensor([[-0.5682, 0.6484, -0.5517], [-0.0951, -0.9186, -0.3934]], requires_grad=True) tensor([-0.0505, 0.4707], requires_grad=True)
In [6]:
def model(x):
    return x @ w.t() + b
In [7]:
# Generate predictions
preds = model(inputs)
print(preds)
tensor([[ -21.8116, -84.9344], [ -30.0094, -114.1981], [ 5.4001, -153.7126], [ -50.5406, -63.2865], [ -15.6322, -121.8144]], grad_fn=<AddBackward0>)
In [8]:
# Compare with targets
print(targets)
tensor([[ 56., 70.], [ 81., 101.], [119., 133.], [ 22., 37.], [103., 119.]])
In [9]:
# MSE loss
def mse(t1, t2):
    diff = t1 - t2
    return torch.sum(diff * diff) / diff.numel()
In [10]:
# Compute loss
loss = mse(preds, targets)
print(loss)
tensor(27118.6348, grad_fn=<DivBackward0>)
In [11]:
# Compute gradients
loss.backward()
In [12]:
# Gradients for weights
print(w)
print(w.grad)
tensor([[-0.5682, 0.6484, -0.5517], [-0.0951, -0.9186, -0.3934]], requires_grad=True) tensor([[ -8250.0127, -8942.5059, -5605.5103], [-16536.5293, -19033.6055, -11526.3594]])
In [13]:
# Generate predictions
preds = model(inputs)
print(preds)
tensor([[ -21.8116, -84.9344], [ -30.0094, -114.1981], [ 5.4001, -153.7126], [ -50.5406, -63.2865], [ -15.6322, -121.8144]], grad_fn=<AddBackward0>)
In [14]:
# Calculate the loss
loss = mse(preds, targets)
print(loss)
tensor(27118.6348, grad_fn=<DivBackward0>)
In [15]:
# Compute gradients
loss.backward()
print(w.grad)
print(b.grad)
tensor([[-16500.0254, -17885.0117, -11211.0205], [-33073.0586, -38067.2109, -23052.7188]]) tensor([-197.4375, -399.1784])
In [16]:
# Adjust weights & reset gradients
with torch.no_grad():
    w -= w.grad * 1e-5
    b -= b.grad * 1e-5
    w.grad.zero_()
    b.grad.zero_()
In [17]:
print(w)
print(b)
tensor([[-0.4032, 0.8272, -0.4396], [ 0.2356, -0.5379, -0.1629]], requires_grad=True) tensor([-0.0485, 0.4747], requires_grad=True)
In [18]:
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
tensor(11550.0303, grad_fn=<DivBackward0>)
In [19]:
# Train for 100 epochs
for i in range(100):
    preds = model(inputs)
    loss = mse(preds, targets)
    loss.backward()
    with torch.no_grad():
        w -= w.grad * 1e-5
        b -= b.grad * 1e-5
        w.grad.zero_()
        b.grad.zero_()
In [20]:
# Calculate loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
tensor(220.4561, grad_fn=<DivBackward0>)
In [21]:
# Predictions
preds
Out[21]:
tensor([[ 58.3183,  76.5219],
        [ 76.2146, 101.2017],
        [130.4675, 121.7629],
        [ 27.1459,  72.2906],
        [ 87.9478,  99.5716]], grad_fn=<AddBackward0>)
In [22]:
# Targets
targets
Out[22]:
tensor([[ 56.,  70.],
        [ 81., 101.],
        [119., 133.],
        [ 22.,  37.],
        [103., 119.]])
In [ ]:
import jovian
jovian.commit()