In [1]:
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
In [2]:
torch.rand(5,3)
Out[2]:
In [3]:
torch.Tensor(5, 3)
Out[3]:
In [4]:
x = torch.rand(5, 3)
y = torch.rand(5, 3)
In [5]:
x.size()
Out[5]:
In [6]:
x.size()[0]
Out[6]:
In [7]:
x + y
Out[7]:
In [8]:
y[:, :2]
Out[8]:
In [9]:
x.numpy()
Out[9]:
In [10]:
numpy_x = x.numpy()
type(numpy_x)
Out[10]:
In [11]:
torch_x = torch.from_numpy(numpy_x)
print(type(torch_x), type(x))
In [12]:
x + y
Out[12]:
In [13]:
x = x.cuda()
y = y.cuda()
x + y
Out[13]:
In [14]:
x = torch.cuda.FloatTensor(5,3)
These are the core components of Pytorch. If you get em right, you are PRO, from day 1.
In [15]:
x = torch.Tensor([12.3])
In [16]:
x = Variable(x, requires_grad=True)
print('x.grad: ', x.grad)
print('x.data: ', x.data)
In [17]:
y = (x ** x) * (x - 2)
z = F.tanh(y)
In [18]:
z.backward()
In [19]:
print('x.grad: ', x.grad)
print('x.data: ', x.data)
In [20]:
print('creator of z: ', z.creator)
print('creator of creator of z: ', z.creator.previous_functions[0][0])
print('creator of creator of creator of z: ', z.creator.previous_functions[0][0].previous_functions[0][0])
In [ ]: