#!pip install jovian --upgrade
import jovian
import torch
import torch.nn
import numpy as np
torch.manual_seed(123)
<torch._C.Generator at 0x7efb9caef910>
For a scalar
# directly creating tensor
# A tensor is a number, vector, matrix or any n-dimensional array. Let's create a tensor with a single number:
t1 = torch.tensor([3.])
t1 , t1.dtype
(tensor([3.]), torch.float32)
For a Vector
# create a tensor with a single list
list1 = [1,2,3,4]
t2 = torch.tensor(list1)
t2, t2.dtype
(tensor([1, 2, 3, 4]), torch.int64)
# create a tensor with a numpy array
# numpy array ---> tensor
arr = np.array([[1, 2], [3, 4.]])
arr.shape, arr.dtype
((2, 2), dtype('float64'))
t4 = torch.tensor(arr)
t4, t4.dtype
(tensor([[1., 2.],
[3., 4.]], dtype=torch.float64), torch.float64)
t3 = torch.from_numpy(arr)
t3, t3.dtype
(tensor([[1., 2.],
[3., 4.]], dtype=torch.float64), torch.float64)
# tensor ---> numpy array
t3.numpy(), t3.dtype
(array([[1., 2.],
[3., 4.]]), torch.float64)
For a Matrix
# matrix
t5 = torch.tensor([[5., 6], [7, 8], [9, 10]])
t5, t5.dtype
(tensor([[ 5., 6.],
[ 7., 8.],
[ 9., 10.]]), torch.float32)
t5.shape
torch.Size([3, 2])
Reshape | View | Resize
# Returns a new tensor with the same data as the :attr:`self` tensor but of a
# different :attr:`shape`.
t5.view(2,-1)
tensor([[ 5., 6., 7.],
[ 8., 9., 10.]])
# Returns a tensor with the same data and number of elements as :attr:`self`
# but with the specified shape. This method returns a view if :attr:`shape` is
# compatible with the current shape.
t5.reshape(2,-1)
tensor([[ 5., 6., 7.],
[ 8., 9., 10.]])
q = torch.tensor(4., requires_grad=True) # requires_grad=True set variable as a derivative which could be diffrentiabe
q1 = torch.tensor(5., requires_grad=True)
r = torch.tensor(2., requires_grad=True)
r1 = torch.tensor(3., requires_grad=True)
b1 = torch.tensor(5., requires_grad=True)
z = q*r+b1
s = z*r1+q1
s.backward()
s
tensor(44., grad_fn=<AddBackward0>)
q.grad, r.grad, b1.grad
(tensor(6.), tensor(12.), tensor(3.))
z.grad, r1.grad, q1.grad # all of the variables except z and s were diff so gradient could be computed
(None, tensor(13.), tensor(1.))
# simple linear function with input(x), weight(w), bias(b)
x = torch.randn(1,3, requires_grad=True) # 3 input values
w = torch.randn(3,1, requires_grad=True) # torch.randn_like(x).t() will produce same tensor weight as randn()
b = torch.randn(3,1 ,requires_grad=True)
x, w, b
(tensor([[-0.1115, 0.1204, -0.3696]], requires_grad=True), tensor([[-0.2404],
[-1.1969],
[ 0.2093]], requires_grad=True), tensor([[-0.9724],
[-0.7550],
[ 0.3239]], requires_grad=True))
Apply simple linear network :
y = x*w+b
y = torch.mm(x, w)+ b
y # check the gradient function
tensor([[-1.1670],
[-0.9497],
[ 0.1293]], grad_fn=<AddBackward0>)
y.backward() # cal grad
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-25-308d2d002a01> in <module>
----> 1 y.backward() # cal grad
/srv/conda/envs/notebook/lib/python3.7/site-packages/torch/tensor.py in backward(self, gradient, retain_graph, create_graph)
164 products. Defaults to ``False``.
165 """
--> 166 torch.autograd.backward(self, gradient, retain_graph, create_graph)
167
168 def register_hook(self, hook):
/srv/conda/envs/notebook/lib/python3.7/site-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
91 grad_tensors = list(grad_tensors)
92
---> 93 grad_tensors = _make_grads(tensors, grad_tensors)
94 if retain_graph is None:
95 retain_graph = create_graph
/srv/conda/envs/notebook/lib/python3.7/site-packages/torch/autograd/__init__.py in _make_grads(outputs, grads)
32 if out.requires_grad:
33 if out.numel() != 1:
---> 34 raise RuntimeError("grad can be implicitly created only for scalar outputs")
35 new_grads.append(torch.ones_like(out))
36 else:
RuntimeError: grad can be implicitly created only for scalar outputs
y2 = torch.sum(y) # sum of elemnet of vector
y2.backward()
for each in zip(x.grad, w.grad, b.grad): # ------> dy/dx, dy/dw, dy/db
print(each)
(tensor([-0.7213, -3.5908, 0.6278]), tensor([-0.3344]), tensor([1.]))
Using Activation fucntion:
sigmod()
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
activation(y)
tensor([[0.2374],
[0.2790],
[0.5323]], grad_fn=<MulBackward0>)
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
This is single neuron arch.
Now, will use two hidden layers H1 and H2 to cal value for output layer
H1 = torch.matmul(features, W1) + B1
H1 = activation(H1)
H2 = torch.matmul(H1, W2) + B2
activation(H2)
tensor([[0.2690]])
jovian.commit()
[jovian] Saving notebook..