In [1]:
import numpy as np
import pandas as pd
import torch
In [2]:
X = torch.Tensor([[1,0,1,0], [1,0,1,1],[0,1,0,1]])
In [3]:
y = torch.Tensor([[1], [1], [0]])
In [8]:
#sigmoid function
def sigmoid(x):
return 1/(1 + torch.exp(-x))
In [10]:
#derivative function for sigmoid
def derivative(x):
return x * (1-x)
In [6]:
#variable declaration
epoch = 5000
lr = 0.1 #learning rate
inputlayer_nuerons = X.shape[1] #input layer nodes
hiddenlayer_nuerons = 3 #hidden layer nodes
output_nuerons = 1 #output layer nodes
In [7]:
#init weight & bais
wh = torch.randn(inputlayer_nuerons, hiddenlayer_nuerons).type(torch.FloatTensor)
bh = torch.randn(1, hiddenlayer_nuerons).type(torch.FloatTensor)
wout = torch.randn(hiddenlayer_nuerons, output_nuerons)
bout = torch.randn(1, output_nuerons)
In [20]:
#create epoch
for i in range(epoch):
#forward propogation
hidden_layer_input1 = torch.mm(X, wh)
# print ('X shape: ',X.shape)
# print ('Wh Shape:',wh.shape)
# print ('X.dot.Wh:' ,hidden_layer_input1.shape)
# print ('bais: ',bh.shape)
hidden_layer_input = hidden_layer_input1 + bh
# print('shape for z', hidden_layer_input.shape)
hidden_layer_activation = sigmoid(hidden_layer_input)
# print('shape after sigmoid', hidden_layer_activation.shape)
output_layer_input1 = torch.mm(hidden_layer_activation, wout)
output_layer_input = output_layer_input1 + bout
output = sigmoid(output_layer_input)
#back propogation
#get error
E = y - output
slope_output_layer = derivative(output) #get derivative after activation function
slope_hidden_layer = derivative(hidden_layer_activation)
d_output = E * slope_output_layer #caculate slope at hidden * error
Error_at_hideen_layer = torch.mm(d_output, wout.t()) #dot product for error at output wout
d_hiddenlayer = Error_at_hideen_layer * slope_hidden_layer #error at hidden layer
wout += torch.mm(hidden_layer_activation.t(), d_output) * lr
bout += d_output.sum() * lr
print ('actual:\n', y ,'\n')
print('output: \n', output)