In [ ]:
# for python 2.* users
from __future__ import print_function
import torch
In [ ]:
x = torch.Tensor(4, 3)
print(x)
In [ ]:
x = torch.rand(4, 3)
print(x)
In [ ]:
print(x.size())
In [ ]:
y = torch.rand(4, 3)
print(x + y)
In [ ]:
print(torch.add(x, y))
In [ ]:
result = torch.Tensor(5, 3)
torch.add(x, y, out=result)
print(result)
In [ ]:
y.add_(x)
print(y)
In [ ]:
print(x[:, 1])
In [ ]:
x = torch.randn(4, 4)
y = x.view(16)
# the size -1 is inferred from other dimensions
z = x.view(-1, 8)
print(x.size(), y.size(), z.size())
In [ ]:
if torch.cuda.is_available():
x = x.cuda()
y = y.cuda()
print(x+y)
In [ ]:
import torch
from torch.autograd import Variable
x = Variable(torch.ones(2, 2), requires_grad=True)
print(x)
In [ ]:
y = x + 2
print(y)
In [ ]:
print(x.grad_fn)
print(y.grad_fn)
In [ ]:
z = y * y * 3
out = z.mean()
print(z, out)
In [ ]:
out.backward()
In [ ]:
print(x.grad)
In [ ]:
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# inherit from nn.Module
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
In [ ]:
params = list(net.parameters())
print(len(params))
print(params[0].size())
In [ ]:
input = Variable(torch.randn(1, 1, 32, 32))
out = net(input)
print(out)
In [ ]:
y.unsqueeze(0).size()
In [ ]:
output = net(input)
target = Variable(torch.arange(1, 11)) # a dummy target, for example
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
In [ ]:
loss.grad_fn.next_functions[0][0].next_functions[0][0].next_functions[0][0]
In [ ]:
net.zero_grad()
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
In [ ]:
learning_rate = 0.01
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate)
In [ ]:
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.01)
optimizer.zero_grad()
output = net(input)
loss = criterion(output, target)
loss.backward()
optimizer.step()
In [ ]: