In [1]:
import numpy as np
In [2]:
import torch
from torch.autograd import Variable
In [3]:
x = Variable(torch.ones(2,2), requires_grad=True) # requires_grad: calculate gradients
print(x)
In [4]:
print(x.data)
In [5]:
print(x.grad)
In [6]:
y = x + 2
print(y)
In [7]:
z = y * y * 3
out = z.sum()
print(z, out)
In [8]:
out.backward() # backpropagation
print(x.grad)
$z = (x + 2)^2 * 3$
$out = 3(x+2)^2$
$dout/dx = 6(x+2)' * 1 = 6(x+2)$
$= 6*3 = 18$
Tensors and Variables have the same API. Variables have the Autograd option, in that they keep track of how they were created.
If you want to use Autograd you have to use Variables.
In [9]:
Q = torch.eye(3)
In [10]:
Q
Out[10]:
In [ ]: