In [4]:
##############################################################################
#
# Workshop: How to develop a personalised machine learning-based application
#
# Notebook 3: Introduction in Neural Networks
#
##############################################################################

In [5]:
# jupyter notebook instructions:
# - Every cell can be executed seperately from the rest.
# - You can execute cells in a non-sequential order (but be carefull of 
#   the dependencies between them).
# - Execute a cell by pressing the play button or Shift+Enter.

In [3]:
# Import necessary modules
import numpy as np

In [4]:
# Seed the random number generator so we get the same numbers
# every time we execute the code
np.random.seed(1)

In [5]:
# We model a single neuron with 3 input connections and one output
# connection. We create a 3 x 1 matric and we assign random weights
# for the input connections. The weights must have a value in the 
# [-1, 1] space, with a mean = 0
weights = 2 * np.random.random((3, 1)) -1

In [6]:
# Define the sigmoid function
def sigmoid(z):
    return (1/(1 + np.e**(-z)))

In [7]:
# And then its derivative. It indicates our confidence on the existing
# weight values wi.
def sigmoid_dz(z):
    return (z*(1 - z))

In [11]:
# Predict the result y from the given input vector X and weight W
def predict(W, X):
    # Reminder: The dot product equals to Σ(xi*wi)
    return sigmoid(np.dot(X, W))


# Train the neural network by adjusting the neuron weights
#   Tin: The input training set, an array X of input vectors xi.
#   Tout: A vector y with the expected output for every vector xi.
#   w: The weights vector.
#   n: The number of training iterations. 
def train(Tin, Tout, w, n):
    for i in xrange(n):
        # Pass the training set data through our network,
        # and calculate the predicted output Pout.
        Pout = predict(w, Tin)
  
        # Calculate the error as y - f(x,θ)
        e = Tout - Pout

        # Calculate the adjustment for the weights wi. The error is
        # multiplied with the derivative of the sigmoid function.
        # Weights with bigger error are adjusted more.
        # Reminder: The dot product equals to Σ(Tin * (ei*Pouti))
        d_theta = np.dot(Tin.T, e * sigmoid_dz(Pout))

        w += d_theta
        
    return w

In [12]:
# Let's try our network in action!
print("Initialize the weights with random values:\n %s \n" % weights)

# Let's use the following training set:
#     x0     x1    x3    y
#     0      0     1     0
#     1      1     1     1
#     1      0     1     1
#     0      1     1     0
training_set_X = np.array([[0,0,1], [1,1,1], [1,0,1], [0,1,1]])
training_set_y = np.array([[0, 1, 1, 0]]).T

weights = train(training_set_X, training_set_y, weights, 10000)

print("Updated weights:\n %s \n" % weights)


Initialize the weights with random values:
 [[ 9.67299303]
 [-0.2078435 ]
 [-4.62963669]] 

Updated weights:
 [[ 10.38040701]
 [ -0.20641179]
 [ -4.98452047]] 


In [13]:
# And let's try to make a prediction
print "[1, 0, 0] =  %s " % predict(weights, np.array([1, 0, 0]))[0]


[1, 0, 0] =  0.999968966337 

In [ ]: