In [8]:
import numpy as np
In [9]:
# Defining the sigmoid function for activations
def sigmoid(x):
return 1/(1+np.exp(-x))
In [10]:
# Derivative of the sigmoid function
def sigmoid_prime(x):
return sigmoid(x) * (1 - sigmoid(x))
In [23]:
# Input data
x = np.array([0.1, 0.3])
print x, x.shape
In [19]:
# Target
y = 0.2
In [24]:
# Input to output weights
weights = np.array([-0.8, 0.5])
print weights, weights.shape
In [25]:
# The learning rate, eta in the weight step equation
learnrate = 0.5
In [28]:
# the linear combination performed by the node (h in f(h) and f'(h))
h = x[0]*weights[0] + x[1]*weights[1]
h2 = np.dot(x,weights)
# or h = np.dot(x, weights)
print h, h2
In [29]:
# The neural network output (y-hat)
nn_output = sigmoid(h)
print nn_output
In [30]:
# output error (y - y-hat)
error = y - nn_output
print y, error
In [32]:
# output gradient (f'(h))
output_grad = sigmoid_prime(h)
print output_grad
In [33]:
# error term (lowercase delta)
error_term = error * output_grad
print error_term
In [34]:
# Gradient descent step
del_w = [ learnrate * error_term * x[0],
learnrate * error_term * x[1]]
# or del_w = learnrate * error_term * x
print del_w
In [ ]: