# patdbro/02-linear-regression-pdb-ipynb-f8db0

2 years ago
In [1]:
``````import numpy as np
import torch``````
In [2]:
``````inputs = np.array([[73,67,43],
[91,88,64],
[87,134,58],
[102,43,37],
[69,96,70]], dtype='float32')``````
In [6]:
``````targets = np.array([[56,70],
[81,101],
[119,133],
[22,37],
[103, 119]], dtype='float32')``````
In [7]:
``````inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
print(inputs)
print(targets)
``````
```tensor([[ 73., 67., 43.], [ 91., 88., 64.], [ 87., 134., 58.], [102., 43., 37.], [ 69., 96., 70.]]) tensor([[ 56., 70.], [ 81., 101.], [119., 133.], [ 22., 37.], [103., 119.]]) ```
In [8]:
``````w = torch.randn(2,3,requires_grad=True)
print(w)
print(b)
``````
```tensor([[ 0.4744, 1.6091, -1.0200], [ 0.5393, 0.4432, -0.9145]], requires_grad=True) tensor([1.1678, 0.0228], requires_grad=True) ```
In [9]:
``````def model(x):
return x @ w.t() + b``````
In [10]:
``````preds = model(inputs)
print(preds)
``````
```tensor([[ 99.7434, 29.7613], [120.6519, 29.5707], [198.8922, 53.2868], [ 81.0023, 40.2523], [116.9683, 15.7641]], grad_fn=<AddBackward0>) ```
In [11]:
``print(targets)``
```tensor([[ 56., 70.], [ 81., 101.], [119., 133.], [ 22., 37.], [103., 119.]]) ```
In [12]:
``````def mse(t1,t2):
diff = t1 - t2
In [13]:
``````loss = mse(preds, targets)
print(loss)
``````
```tensor(3728.8628, grad_fn=<DivBackward0>) ```
In [16]:
``loss.backward()``
In [17]:
``````print(w)
``````
```tensor([[ 0.4744, 1.6091, -1.0200], [ 0.5393, 0.4432, -0.9145]], requires_grad=True) tensor([[ 4146.8521, 4200.7573, 2442.6606], [-4632.8164, -5886.8281, -3606.2563]]) ```
In [19]:
``````w.grad.zero_()
``````
```tensor([[0., 0., 0.], [0., 0., 0.]]) tensor([0., 0.]) ```
In [20]:
``````preds = model(inputs)
print(preds)
``````
```tensor([[ 99.7434, 29.7613], [120.6519, 29.5707], [198.8922, 53.2868], [ 81.0023, 40.2523], [116.9683, 15.7641]], grad_fn=<AddBackward0>) ```
In [22]:
``````loss = mse(preds, targets)
print(loss)
``````
```tensor(3728.8628, grad_fn=<DivBackward0>) ```
In [24]:
``````loss.backward()
``````
```tensor([[ 4146.8521, 4200.7573, 2442.6606], [-4632.8164, -5886.8281, -3606.2563]]) tensor([ 47.2516, -58.2730]) ```
In [25]:
``````with torch.no_grad():
``````
In [26]:
``````print(w)
print(b)
``````
```tensor([[ 0.4329, 1.5671, -1.0445], [ 0.5856, 0.5020, -0.8785]], requires_grad=True) tensor([1.1673, 0.0234], requires_grad=True) ```
In [27]:
``````preds = model(inputs)
loss = mse(preds, targets)
print(loss)
``````
```tensor(2727.4968, grad_fn=<DivBackward0>) ```
In [45]:
``````for i in range(500):
preds = model(inputs)
loss = mse(preds, targets)
loss.backward()
``````
In [46]:
``````preds = model(inputs)
loss = mse(preds, targets)
print(loss)
``````
```tensor(50.7088, grad_fn=<DivBackward0>) ```
In [47]:
``preds``
Out[47]:
``````tensor([[ 58.0058,  70.8741],
[ 76.7061,  96.0836],
[129.8559, 142.4814],
[ 24.2202,  39.7749],
In [48]:
``targets``
Out[48]:
``````tensor([[ 56.,  70.],
[ 81., 101.],
[119., 133.],
[ 22.,  37.],
[103., 119.]])``````
In [49]:
``import torch.nn as nn``
In [50]:
``````inputs = np.array([[73, 67, 43], [91, 88, 64], [87, 134, 58],
[102, 43, 37], [69, 96, 70], [73, 67, 43],
[91, 88, 64], [87, 134, 58], [102, 43, 37],
[69, 96, 70], [73, 67, 43], [91, 88, 64],
[87, 134, 58], [102, 43, 37], [69, 96, 70]],
dtype='float32')
targets = np.array([[56, 70], [81, 101], [119, 133],
[22, 37], [103, 119], [56, 70],
[81, 101], [119, 133], [22, 37],
[103, 119], [56, 70], [81, 101],
[119, 133], [22, 37], [103, 119]],
dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
``````
In [51]:
``from torch.utils.data import TensorDataset``
In [53]:
``````train_ds = TensorDataset(inputs, targets)
train_ds[0:3]``````
Out[53]:
``````(tensor([[ 73.,  67.,  43.],
[ 91.,  88.,  64.],
[ 87., 134.,  58.]]), tensor([[ 56.,  70.],
[ 81., 101.],
[119., 133.]]))``````
In [54]:
``from torch.utils.data import DataLoader``
In [55]:
``````batch_size = 5
train_dl = DataLoader(train_ds, batch_size, shuffle = True)``````
In [57]:
``````for xb, yb in train_dl:
print(xb)
print(yb)
break``````
```tensor([[91., 88., 64.], [73., 67., 43.], [69., 96., 70.], [73., 67., 43.], [69., 96., 70.]]) tensor([[ 81., 101.], [ 56., 70.], [103., 119.], [ 56., 70.], [103., 119.]]) ```
In [58]:
``````model = nn.Linear(3,2)
print(model.weight)
print(model.bias)
``````
```Parameter containing: tensor([[-0.0537, -0.4990, 0.5435], [-0.3200, -0.1851, -0.3750]], requires_grad=True) Parameter containing: tensor([-0.5131, -0.0229], requires_grad=True) ```
In [59]:
``list(model.parameters())``
Out[59]:
``````[Parameter containing:
tensor([[-0.0537, -0.4990,  0.5435],
Parameter containing:
In [60]:
``````preds = model(inputs)
preds
``````
Out[60]:
``````tensor([[-14.4937, -51.9079],
[-14.5255, -69.4293],
[-40.5249, -74.4125],
[ -7.3354, -54.4968],
[-14.0758, -66.1195],
[-14.4937, -51.9079],
[-14.5255, -69.4293],
[-40.5249, -74.4125],
[ -7.3354, -54.4968],
[-14.0758, -66.1195],
[-14.4937, -51.9079],
[-14.5255, -69.4293],
[-40.5249, -74.4125],
[ -7.3354, -54.4968],
In [61]:
``import torch.nn.functional as F``
In [63]:
``loss_fn = F.mse_loss``
In [64]:
``````loss = loss_fn(model(inputs), targets)
print(loss)
``````
```tensor(18367.8496, grad_fn=<MseLossBackward>) ```
In [65]:
``opt = torch.optim.SGD(model.parameters(), lr = 1e-5)``
In [66]:
``````def fit(num_epochs, model, loss_fn, opt):
for epoch in range(num_epochs):
for xb, yb in train_dl:
pred = model(xb)
loss = loss_fn(pred, yb)
loss.backward()
opt.step()
if (epoch+1) % 10 == 0:
print('Epocj [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))``````
In [69]:
``fit(500, model, loss_fn, opt)``
```Epocj [10/500], Loss: 13.4332 Epocj [20/500], Loss: 7.9562 Epocj [30/500], Loss: 7.6353 Epocj [40/500], Loss: 9.6656 Epocj [50/500], Loss: 5.8910 Epocj [60/500], Loss: 7.2819 Epocj [70/500], Loss: 2.0595 Epocj [80/500], Loss: 5.0670 Epocj [90/500], Loss: 7.3966 Epocj [100/500], Loss: 2.6466 Epocj [110/500], Loss: 5.5364 Epocj [120/500], Loss: 4.3563 Epocj [130/500], Loss: 4.7973 Epocj [140/500], Loss: 3.9083 Epocj [150/500], Loss: 2.7219 Epocj [160/500], Loss: 5.0317 Epocj [170/500], Loss: 4.3571 Epocj [180/500], Loss: 5.0892 Epocj [190/500], Loss: 2.5733 Epocj [200/500], Loss: 3.1147 Epocj [210/500], Loss: 2.5713 Epocj [220/500], Loss: 3.6608 Epocj [230/500], Loss: 2.6571 Epocj [240/500], Loss: 1.2790 Epocj [250/500], Loss: 2.0212 Epocj [260/500], Loss: 2.0507 Epocj [270/500], Loss: 3.0587 Epocj [280/500], Loss: 1.8138 Epocj [290/500], Loss: 1.5471 Epocj [300/500], Loss: 2.4758 Epocj [310/500], Loss: 1.7142 Epocj [320/500], Loss: 1.6540 Epocj [330/500], Loss: 1.8343 Epocj [340/500], Loss: 1.7114 Epocj [350/500], Loss: 1.7381 Epocj [360/500], Loss: 1.5822 Epocj [370/500], Loss: 1.8067 Epocj [380/500], Loss: 1.9285 Epocj [390/500], Loss: 1.4141 Epocj [400/500], Loss: 1.3890 Epocj [410/500], Loss: 1.0819 Epocj [420/500], Loss: 1.0841 Epocj [430/500], Loss: 1.3877 Epocj [440/500], Loss: 0.9794 Epocj [450/500], Loss: 1.3461 Epocj [460/500], Loss: 1.1157 Epocj [470/500], Loss: 0.9439 Epocj [480/500], Loss: 1.3156 Epocj [490/500], Loss: 1.2327 Epocj [500/500], Loss: 1.1181 ```
In [70]:
``````preds = model(inputs)
preds
``````
Out[70]:
``````tensor([[ 57.0735,  70.3822],
[ 82.5073, 100.0089],
[118.2888, 134.3195],
[ 20.9807,  37.4039],
[102.3905, 117.7594],
[ 57.0735,  70.3822],
[ 82.5073, 100.0089],
[118.2888, 134.3195],
[ 20.9807,  37.4039],
[102.3905, 117.7594],
[ 57.0735,  70.3822],
[ 82.5073, 100.0089],
[118.2888, 134.3195],
[ 20.9807,  37.4039],
In [71]:
``targets``
Out[71]:
``````tensor([[ 56.,  70.],
[ 81., 101.],
[119., 133.],
[ 22.,  37.],
[103., 119.],
[ 56.,  70.],
[ 81., 101.],
[119., 133.],
[ 22.,  37.],
[103., 119.],
[ 56.,  70.],
[ 81., 101.],
[119., 133.],
[ 22.,  37.],
[103., 119.]])``````
In [72]:
``import jovian``
In [ ]:
``jovian.commit()``
```[jovian] Saving notebook.. ```
In [ ]:
`` ``