In [1]:
import numpy as np

In [2]:
def sigmoid(x):
    """
    Calculate sigmoid
    """
    return 1/(1+np.exp(-x))

In [3]:
def sigmoid_prime(x):
    """
    # Derivative of the sigmoid function
    """
    return sigmoid(x) * (1 - sigmoid(x))

In [14]:
learnrate = 0.5
x = np.array([1, 2, 3, 4])
y = np.array(0.5)
print ('x: ',x, "y: ", y, "learnrate: ", learnrate)


('x: ', array([1, 2, 3, 4]), 'y: ', array(0.5), 'learnrate: ', 0.5)

In [15]:
# Initial weights
w = np.array([0.5, -0.5, 0.3, 0.1])
print ('w: ', w)


('w: ', array([ 0.5, -0.5,  0.3,  0.1]))

In [16]:
### Calculate one gradient descent step for each weight
### Note: Some steps have been consilated, so there are
###       fewer variable names than in the above sample code

# TODO: Calculate the node's linear combination of inputs and weights
h = np.dot(x,w)
print ('h: ', h)


('h: ', 0.79999999999999982)

In [17]:
# TODO: Calculate output of neural network
nn_output = sigmoid(h)
print('Neural Network output: ', nn_output)


('Neural Network output: ', 0.6899744811276125)

In [18]:
# TODO: Calculate error of neural network
error = y - nn_output
print('Amount of Error: ', error)


('Amount of Error: ', -0.1899744811276125)

In [19]:
# TODO: Calculate the error term
#       Remember, this requires the output gradient, which we haven't
#       specifically added a variable for.
error_term = error * sigmoid_prime(h)
print('Error Term: ', error_term)


('Error Term: ', -0.040637383604607988)

In [20]:
# TODO: Calculate change in weights
del_w = learnrate * error_term * x
print('Change in Weights: ', del_w)


('Change in Weights: ', array([-0.02031869, -0.04063738, -0.06095608, -0.08127477]))

In [ ]: