In [1]:
import numpy as np
In [2]:
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
In [3]:
def sigmoid_prime(x):
"""
# Derivative of the sigmoid function
"""
return sigmoid(x) * (1 - sigmoid(x))
In [14]:
learnrate = 0.5
x = np.array([1, 2, 3, 4])
y = np.array(0.5)
print ('x: ',x, "y: ", y, "learnrate: ", learnrate)
In [15]:
# Initial weights
w = np.array([0.5, -0.5, 0.3, 0.1])
print ('w: ', w)
In [16]:
### Calculate one gradient descent step for each weight
### Note: Some steps have been consilated, so there are
### fewer variable names than in the above sample code
# TODO: Calculate the node's linear combination of inputs and weights
h = np.dot(x,w)
print ('h: ', h)
In [17]:
# TODO: Calculate output of neural network
nn_output = sigmoid(h)
print('Neural Network output: ', nn_output)
In [18]:
# TODO: Calculate error of neural network
error = y - nn_output
print('Amount of Error: ', error)
In [19]:
# TODO: Calculate the error term
# Remember, this requires the output gradient, which we haven't
# specifically added a variable for.
error_term = error * sigmoid_prime(h)
print('Error Term: ', error_term)
In [20]:
# TODO: Calculate change in weights
del_w = learnrate * error_term * x
print('Change in Weights: ', del_w)
In [ ]: