In [1]:
import numpy as np

In [2]:
def nonlin(x,deriv=False):
    if(deriv==True):
        return x*(1-x)
    return 1/(1+np.exp(-x))

In [3]:
X = np.array([  [0,1,1],
                [1,1,1],
                [1,0,0],])

In [4]:
X


Out[4]:
array([[0, 1, 1],
       [1, 1, 1],
       [1, 0, 0]])

In [5]:
y = np.array([[0,1,1]]).T

In [6]:
y


Out[6]:
array([[0],
       [1],
       [1]])

In [7]:
np.random.seed(1)

In [8]:
syn0 = 2*np.random.random((3,1)) - 1

In [14]:
syn0


Out[14]:
array([[-0.16595599],
       [ 0.44064899],
       [-0.99977125]])

In [15]:
for iter in xrange(10000):

    # forward prop
    l0 = X
    l1 = nonlin(np.dot(l0,syn0))

    # error
    l1_error = y - l1

    # back prop
    l1_delta = l1_error * nonlin(l1,True)

    # weights update
    syn0 += np.dot(l0.T,l1_delta)

In [16]:
l0


Out[16]:
array([[0, 1, 1],
       [1, 1, 1],
       [1, 0, 0]])

In [17]:
l1


Out[17]:
array([[ 0.01142146],
       [ 0.98979175],
       [ 0.99988086]])

In [18]:
l1_error


Out[18]:
array([[-0.01142146],
       [ 0.01020825],
       [ 0.00011914]])

In [19]:
l1_delta


Out[19]:
array([[ -1.28959798e-04],
       [  1.03144553e-04],
       [  1.41932018e-08]])

In [20]:
syn0


Out[20]:
array([[ 9.0351758 ],
       [-1.51020277],
       [-2.95062301]])

In [21]:
print "Trained output:"
print l1


Trained output:
[[ 0.01142146]
 [ 0.98979175]
 [ 0.99988086]]

In [ ]: