In [8]:
    
import torch
from torch.autograd import Variable
from torch import nn
    
In [9]:
    
import matplotlib.pyplot as plt
%matplotlib inline
torch.manual_seed(1)
    
    Out[9]:
In [41]:
    
# X and Y training data
x_train = torch.Tensor([[1], [2], [3]])
y_train = torch.Tensor([[1], [2], [3]])
# x_train = torch.Tensor([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168], 
#                     [9.779], [6.182], [7.59], [2.167], [7.042], 
#                     [10.791], [5.313], [7.997], [3.1]])
# y_train = torch.Tensor([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573], 
#                     [3.366], [2.596], [2.53], [1.221], [2.827], 
#                     [3.465], [1.65], [2.904], [1.3]])
x, y = Variable(x_train), Variable(y_train)
plt.scatter(x.data.numpy(), y.data.numpy())
plt.show()
    
    
In [13]:
    
W = Variable(torch.rand(1,1))
x, W, x.mm(W)
    
    Out[13]:
In [17]:
    
plt.ion()   # something about plotting
cost_func = torch.nn.MSELoss()                 # Our mean squared Cost function 
lr = 0.01
for step in range(300):
    prediction = x.mm(W)                       # Our Model XW
    cost = cost_func(prediction, y)            # must be (1. prediction, 2. training target y) 
    gradient = (prediction-y).view(-1).dot(x.view(-1)) / len(x)  # Compute Gradient of cost w.r.t W (dCost/dW) 
    W -= lr * gradient                         # Update weight parameter with learning lr
    
    if step % 5 == 0:
        # plot and show learning process
        plt.cla()
        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
        plt.title('cost=%.4f, w=%.4f, grad=%.4f' % (cost.data[0], W.data[0][0], gradient.data[0]), fontdict={'size': 20} )
        plt.show()
        plt.pause(0.1)
        
print('Linear Model Optimization is Done!')
plt.ioff()
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
In [34]:
    
x_test = Variable(torch.Tensor([[5]]))
y_test = x.mm(W)
print(y)
    
    
In [30]:
    
model = nn.Linear(1, 1, bias=True)    # Our Model XW+b
cost_func = nn.MSELoss()        # Our mean squared Cost function 
print(model)  # model
model.weight, model.bias
    
    
    Out[30]:
In [31]:
    
plt.ion()   # something about plotting
optimizer = torch.optim.SGD(model.parameters(), lr= 0.01)
for step in range(300):
    
    prediction = model(x)               # input x and predict based on x
    cost = cost_func(prediction, y)     # must be (1. prediction, 2. training target y)
    optimizer.zero_grad()               # clear gradients for next traing
    cost.backward()                     # compute gradient value of parameters
    optimizer.step()                    # update gradients
    #print ('dL/dw: ', model.weight.grad) 
    #print ('dL/db: ', model.bias.grad)
    
    if step % 5 == 0:
        # plot and show learning process
        plt.cla()
        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
        plt.title('cost=%.4f, w=%.4f, b=%.4f' % (cost.data[0], model.weight.data[0][0],model.bias.data[0]), fontdict={'size': 20} )
        plt.show()
        plt.pause(0.1)
        
print('Linear Model Optimization is Done!')
plt.ioff()
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
    
In [33]:
    
x_test = Variable(torch.Tensor([[7]]))
y_test = model(x_test)
print('input: %.4f, output: %.4f' % (x_test.data[0][0], y_test.data[0][0]) )
    
    
In [44]:
    
W_val, cost_val = [], []
for i in range(-30, 51):
    W = i*0.1
    model.weight.data.fill_(W)
    cost = cost_func(model(x),y)
    
    #print('{:.2f}, {:.2f}'.format(W, cost.data[0]))
    W_val.append(W)
    cost_val.append(cost.data[0])
# ------------------------------------------ #
plt.plot(W_val, cost_val, 'ro')
plt.ylabel('Cost(W)')
plt.xlabel('W')
plt.show()
    
    
In [45]:
    
import numpy as np
    
In [47]:
    
xy = np.loadtxt('data-01-test-score.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
# Make sure the shape and data are OK
print('shape: ', x_data.shape, '\nlength:', len(x_data), '\n', x_data )
print('shape: ', y_data.shape, '\nlength:', len(y_data), '\n', y_data )
x, y = Variable(torch.from_numpy(x_data)), Variable(torch.from_numpy(y_data))
    
    
In [54]:
    
# Our hypothesis XW+b
mv_model = nn.Linear(3, 1, bias=True)
print( mv_model )
print( 'weight: ', mv_model.weight ) 
print( 'bias: ', mv_model.bias )
    
    
In [57]:
    
# cost criterion
cost_func = nn.MSELoss()
# Minimize
optimizer = torch.optim.SGD(mv_model.parameters(), lr=1e-5)
# Train the model
for step in range(2001):
    optimizer.zero_grad()
    
    # Our model
    prediction = mv_model(x)
    cost = cost_func(prediction, y)
    cost.backward()    
    optimizer.step()
    if step % 50 == 0:
        print(step, "Cost: ", cost.data.numpy(), "\nPrediction:\n", prediction.data.t().numpy())
    
    
In [61]:
    
model.state_dict()
    
    Out[61]:
In [ ]:
    
# Predict my score
print("Your score will be ", model(Variable(torch.Tensor([[100, 70, 101]]))).data.numpy())
print("Other scores will be ", model(Variable(torch.Tensor([[60, 70, 110], [90, 100, 80]]))).data.numpy())