In [44]:
import torch
from torch.autograd import Variable
import torch.nn as nn
torch.manual_seed(1) # reproducible
Out[44]:
In [45]:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
In [46]:
x = torch.linspace(-5, 5, 200) # x data (tensor), shape=(100, 1)
x = Variable(x)
x_np = x.data.numpy() # numpy array for plotting
In [47]:
y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
# y_softmax = F.softmax(x)
# softmax is a special kind of activation function, it is about probability
# and will make the sum as 1.\
In [48]:
plt.figure(1, figsize=(8, 6))
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim((-1, 5))
plt.legend(loc='best')
plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
plt.ylim((-0.2, 1.2))
plt.legend(loc='best')
plt.subplot(223)
plt.plot(x_np, y_tanh, c='red', label='tanh')
plt.ylim((-1.2, 1.2))
plt.legend(loc='best')
plt.subplot(224)
plt.plot(x_np, y_softplus, c='red', label='softplus')
plt.ylim((-0.2, 6))
plt.legend(loc='best')
plt.show()
In [117]:
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1)
y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
x, y = Variable(x), Variable(y)
plt.scatter(x.data.numpy(), y.data.numpy())
plt.show()
In [137]:
# Hypothesis using sigmoid and linear model
hidden = nn.Linear(1, 10, bias=True)
sigmoid = nn.ReLU()
output = nn.Linear(10, 1, bias=True)
net = torch.nn.Sequential(hidden, sigmoid, output)
In [131]:
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
loss_func = torch.nn.MSELoss() # this is for regression mean squared loss
In [133]:
for t in range(200):
prediction = net(x) # input x and predict based on x
loss = loss_func(prediction, y) # must be (1. nn output, 2. target)
optimizer.zero_grad() # clear gradients for next train
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
if t % 10 == 0:
# plot and show learning process
plt.cla()
plt.scatter(x.data.numpy(), y.data.numpy())
plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
plt.text(0.5, 0, 'Loss=%.4f' % loss.data[0], fontdict={'size': 20, 'color': 'red'})
plt.show()
plt.pause(0.2)
plt.ioff()
In [138]:
torch.save(net.state_dict(), 'nonlinear.pkl')
net.state_dict()
Out[138]:
In [139]:
net.load_state_dict(torch.load('nonlinear.pkl'))
net.state_dict()
Out[139]:
In [ ]:
torach.save(net, 'net.pkl')
net=torch.load('net.plk')