Jovian
⭐️
Sign In
In [5]:
import torch

创建一个张量并设置 requires_grad=True 用来追踪他的计算历史

In [9]:
x = torch.ones(2, 2, requires_grad=True)
print(x)
tensor([[1., 1.], [1., 1.]], requires_grad=True)
In [10]:
y = x + 2
print(y)
tensor([[3., 3.], [3., 3.]], grad_fn=<AddBackward0>)
In [11]:
print(y.grad_fn)
<AddBackward0 object at 0x7f9258b42be0>
In [12]:
z = y* y *3
z
Out[12]:
tensor([[27., 27.],
        [27., 27.]], grad_fn=<MulBackward0>)
In [14]:
out = z.mean()
print (z, out)  # 打印两个张量
# MulBackward0 相乘 MeanBackward0 平均数
tensor([[27., 27.], [27., 27.]], grad_fn=<MulBackward0>) tensor(27., grad_fn=<MeanBackward0>)

.requires_grad_( ... ) 可以改变现有张量的 requires_grad属性。 如果没有指定的话,默认输入的flag是 False。

In [15]:
a = torch.randn(2,2)
In [16]:
a = ((a *3) / (a -1))
In [17]:
print(a.requires_grad)
False
In [18]:
a.requires_grad_(True)
Out[18]:
tensor([[-0.8642,  1.1833],
        [ 1.7708,  6.3330]], requires_grad=True)
In [19]:
b = (a * a).sum()
In [20]:
print(b.grad_fn)
<SumBackward0 object at 0x7f92588567b8>

梯度

In [27]:
#print(x.backward())
In [28]:
print(x.grad) #grad 梯度,gradient的缩写
tensor([[4.5000, 4.5000], [4.5000, 4.5000]])
In [29]:
x = torch.randn(3, requires_grad=True)
y = x * 2
while y.data.norm() < 1000:
    y = y * 2
print(y)
tensor([ 598.5151, -414.5598, -1321.2048], grad_fn=<MulBackward0>)
In [30]:
gradients = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float)
y.backward(gradients)

print(x.grad)
tensor([1.0240e+02, 1.0240e+03, 1.0240e-01])
In [31]:
import jovian
In [ ]:
jovian.commit()
[jovian] Saving notebook..
In [ ]: