XOR opertaion by NN


In [4]:
import numpy as np
import tensorflow as tf

xy = np.loadtxt('data/xor_data.txt', unpack=True)

# Need to change data structure. THESE LINES ARE DIFFERNT FROM Video BUT IT MAKES THIS CODE WORKS!
x_data = np.transpose( xy[0:-1] )
y_data = np.reshape( xy[-1], (4,1) )

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

W1 = tf.Variable(tf.random_uniform( [2,2], -1.0, 1.0))
W2 = tf.Variable(tf.random_uniform( [2,1], -1.0, 1.0))

b1 = tf.Variable(tf.zeros([2]), name="Bias1")
b2 = tf.Variable(tf.zeros([1]), name="Bias2")

# Hypotheses 
L2 =  tf.sigmoid(tf.matmul(X,W1)+b1)
hypothesis = tf.sigmoid( tf.matmul(L2,W2) + b2)

# Cost function 
cost = -tf.reduce_mean( Y*tf.log(hypothesis)+(1-Y)* tf.log(1.-hypothesis) )

# Minimize cost.
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)

# Initializa all variables.
init = tf.global_variables_initializer()


# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    
    for step in range(8001):
        sess.run(train, feed_dict={X:x_data, Y:y_data})
        
        if step % 1000 == 0:
            print(
                step, 
                sess.run(cost, feed_dict={X:x_data, Y:y_data}), 
                sess.run(W1),
                sess.run(W2)
            )
    
    # Test model
    correct_prediction = tf.equal( tf.floor(hypothesis+0.5), Y)
    accuracy = tf.reduce_mean(tf.cast( correct_prediction, "float" ) )
    
    # Check accuracy
    print( sess.run( [hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], 
                   feed_dict={X:x_data, Y:y_data}) )
    print( "Accuracy:", accuracy.eval({X:x_data, Y:y_data}) )


0 0.691531 [[ 0.76818109  0.22193229]
 [ 0.63992149  0.03227983]] [[ 0.45615566]
 [-0.64007944]]
1000 0.674836 [[ 1.41733408  0.31909531]
 [ 1.34547555  0.11716747]] [[ 1.03060818]
 [-0.85131478]]
2000 0.575201 [[ 2.96146917  0.677468  ]
 [ 2.91184211  0.42641398]] [[ 2.69966912]
 [-1.64971578]]
3000 0.371596 [[ 4.52205801  1.45899916]
 [ 4.45863724  1.40490997]] [[ 4.59317017]
 [-3.73257446]]
4000 0.128313 [[ 5.38840342  2.87900662]
 [ 5.35865641  2.87475729]] [[ 6.33088064]
 [-6.46549416]]
5000 0.0625546 [[ 5.81085491  3.57242513]
 [ 5.79154778  3.56950641]] [[ 7.45453405]
 [-7.93570805]]
6000 0.0398186 [[ 6.06579685  3.94856215]
 [ 6.05098009  3.94617939]] [[ 8.19569016]
 [-8.81085968]]
7000 0.028854 [[ 6.24415302  4.19555235]
 [ 6.2318573   4.19349146]] [[ 8.73965359]
 [-9.42083168]]
8000 0.0225018 [[ 6.37982082  4.37598419]
 [ 6.36915922  4.37414885]] [[ 9.16725636]
 [-9.88607788]]
[array([[ 0.02553321],
       [ 0.97963244],
       [ 0.97965372],
       [ 0.02274577]], dtype=float32), array([[ 0.],
       [ 1.],
       [ 1.],
       [ 0.]], dtype=float32), array([[ True],
       [ True],
       [ True],
       [ True]], dtype=bool), 1.0]
Accuracy: 1.0

Wide NN


In [5]:
xy = np.loadtxt('data/xor_data.txt', unpack=True)

# Need to change data structure. THESE LINES ARE DIFFERNT FROM Video BUT IT MAKES THIS CODE WORKS!
x_data = np.transpose( xy[0:-1] )
y_data = np.reshape( xy[-1], (4,1) )

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

W1 = tf.Variable(tf.random_uniform( [2,10], -1.0, 1.0))
W2 = tf.Variable(tf.random_uniform( [10,1], -1.0, 1.0))

b1 = tf.Variable(tf.zeros([10]), name="Bias1")
b2 = tf.Variable(tf.zeros([1]), name="Bias2")

# Hypotheses 
L2 =  tf.sigmoid(tf.matmul(X,W1)+b1)
hypothesis = tf.sigmoid( tf.matmul(L2,W2) + b2)

# Cost function 
cost = -tf.reduce_mean( Y*tf.log(hypothesis)+(1-Y)* tf.log(1.-hypothesis) )

# Minimize cost.
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)

# Initializa all variables.
init = tf.global_variables_initializer()


# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    
    for step in range(8001):
        sess.run(train, feed_dict={X:x_data, Y:y_data})
        
        if step % 1000 == 0:
            print(
                step, 
                sess.run(cost, feed_dict={X:x_data, Y:y_data}), 
                sess.run(W1),
                sess.run(W2)
            )
    
    # Test model
    correct_prediction = tf.equal( tf.floor(hypothesis+0.5), Y)
    accuracy = tf.reduce_mean(tf.cast( correct_prediction, "float" ) )
    
    # Check accuracy
    print( sess.run( [hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], 
                   feed_dict={X:x_data, Y:y_data}) )
    print( "Accuracy:", accuracy.eval({X:x_data, Y:y_data}) )


---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-5-01f246c8f5bd> in <module>()
     33 # Launch the graph
     34 with tf.Session() as sess:
---> 35     sess.run(init)
     36 
     37     for step in range(8001):

/home/jhdybpark/.virtualenvs/python36/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    765     try:
    766       result = self._run(None, fetches, feed_dict, options_ptr,
--> 767                          run_metadata_ptr)
    768       if run_metadata:
    769         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/jhdybpark/.virtualenvs/python36/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    963     if final_fetches or final_targets:
    964       results = self._do_run(handle, final_targets, final_fetches,
--> 965                              feed_dict_string, options, run_metadata)
    966     else:
    967       results = []

/home/jhdybpark/.virtualenvs/python36/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1013     if handle is None:
   1014       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1015                            target_list, options, run_metadata)
   1016     else:
   1017       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/jhdybpark/.virtualenvs/python36/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1020   def _do_call(self, fn, *args):
   1021     try:
-> 1022       return fn(*args)
   1023     except errors.OpError as e:
   1024       message = compat.as_text(e.message)

/home/jhdybpark/.virtualenvs/python36/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1002         return tf_session.TF_Run(session, options,
   1003                                  feed_dict, fetch_list, target_list,
-> 1004                                  status, run_metadata)
   1005 
   1006     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

Deep NN


In [ ]:
xy = np.loadtxt('data/xor_data.txt', unpack=True)

x_data = np.transpose( xy[0:-1] )
y_data = np.reshape( xy[-1], (4,1) )

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

# Deep network configuration.: Use more layers. 
W1 = tf.Variable(tf.random_uniform( [2,5], -1.0, 1.0))
W2 = tf.Variable(tf.random_uniform( [5,4], -1.0, 1.0))
W3 = tf.Variable(tf.random_uniform( [4,1], -1.0, 1.0))


b1 = tf.Variable(tf.zeros([5]), name="Bias1")
b2 = tf.Variable(tf.zeros([4]), name="Bias2")
b3 = tf.Variable(tf.zeros([1]), name="Bias3")


# Hypotheses 
L2 =  tf.sigmoid(tf.matmul(X,W1)+b1)
L3 =  tf.sigmoid(tf.matmul(L2,W2)+b2)
hypothesis = tf.sigmoid( tf.matmul(L3,W3) + b3)

# Cost function 
cost = -tf.reduce_mean( Y*tf.log(hypothesis)+(1-Y)* tf.log(1.-hypothesis) )

# Minimize cost.
a = tf.Variable(0.1)
optimizer = tf.train.GradientDescentOptimizer(a)
train = optimizer.minimize(cost)

# Initializa all variables.
init = tf.global_variables_initializer()


# Launch the graph
with tf.Session() as sess:
    sess.run(init)
    
    for step in range(20001):
        sess.run(train, feed_dict={X:x_data, Y:y_data})
        
        if step % 1000 == 0:
            print(
                step, 
                sess.run(cost, feed_dict={X:x_data, Y:y_data}), 
                sess.run(W1),
                sess.run(W2)
            )
    
    # Test model
    correct_prediction = tf.equal( tf.floor(hypothesis+0.5), Y)
    accuracy = tf.reduce_mean(tf.cast( correct_prediction, "float" ) )
    
    # Check accuracy
    print( sess.run( [hypothesis, tf.floor(hypothesis+0.5), correct_prediction, accuracy], 
                   feed_dict={X:x_data, Y:y_data}) )
    print( "Accuracy:", accuracy.eval({X:x_data, Y:y_data}) )