In [8]:
import numpy as np

In [9]:
# Defining the sigmoid function for activations
def sigmoid(x):
    return 1/(1+np.exp(-x))

In [10]:
# Derivative of the sigmoid function
def sigmoid_prime(x):
    return sigmoid(x) * (1 - sigmoid(x))

In [23]:
# Input data
x = np.array([0.1, 0.3])
print x, x.shape


[ 0.1  0.3] (2,)

In [19]:
# Target
y = 0.2

In [24]:
# Input to output weights
weights = np.array([-0.8, 0.5])
print weights, weights.shape


[-0.8  0.5] (2,)

In [25]:
# The learning rate, eta in the weight step equation
learnrate = 0.5

In [28]:
# the linear combination performed by the node (h in f(h) and f'(h))
h = x[0]*weights[0] + x[1]*weights[1]
h2 = np.dot(x,weights)
# or h = np.dot(x, weights)
print h, h2


0.07 0.07

In [29]:
# The neural network output (y-hat)
nn_output = sigmoid(h)
print nn_output


0.517492857666

In [30]:
# output error (y - y-hat)
error = y - nn_output
print y, error


0.2 -0.317492857666

In [32]:
# output gradient (f'(h))
output_grad = sigmoid_prime(h)
print output_grad


0.249693999931

In [33]:
# error term (lowercase delta)
error_term = error * output_grad
print error_term


-0.0792760615801

In [34]:
# Gradient descent step 
del_w = [ learnrate * error_term * x[0],
          learnrate * error_term * x[1]]
# or del_w = learnrate * error_term * x
print del_w


[-0.0039638030790068828, -0.011891409237020648]

In [ ]: