In [9]:
#!/usr/bin/env PYTHONIOENCODING="utf-8" python
"""
A simple neural network learning the AND function
"""
import tensorflow as tf
sess = tf.InteractiveSession()

# Desired input output mapping of XOR function:
x_ = [[0, 0], [0, 1], [1, 0], [1, 1]] # input
#labels=[0,      0,      0,      1]   # output =>
expect=[[1,0],  [0,1],  [0,1], [1,0]] # ONE HOT REPRESENTATION! 'class' [1,0]==0 [0,1]==1

# x = tf.Variable(x_)
x = tf.placeholder("float", [None,2]) #  can we feed directly?
y_ = tf.placeholder("float", [None, 2]) # two output classes

number_hidden_nodes = 20 # 20 outputs to create some room for negatives and positives

W = tf.Variable(tf.random_uniform([2, number_hidden_nodes], -.01, .01))
b = tf.Variable(tf.random_uniform([number_hidden_nodes], -.01, .01))
hidden  = tf.nn.relu(tf.matmul(x,W) + b) # first layer.

 # the XOR function is the first nontrivial function, for which a two layer network is needed.
W2 = tf.Variable(tf.random_uniform([number_hidden_nodes,2], -.1, .1))
b2 = tf.Variable(tf.zeros([2]))
hidden2 = tf.matmul(hidden, W2)#+b2

y = tf.nn.softmax(hidden2)


# Define loss and optimizer
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)

# Train
tf.initialize_all_variables().run()
for step in range(1000):
    feed_dict={x: x_, y_:expect } # feed the net with our inputs and desired outputs.
    e,a=sess.run([cross_entropy,train_step],feed_dict)
    if e<1:break # early stopping yay
    print ("step %d : entropy %s" % (step,e)) # error/loss should decrease over time


# Test trained model
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) # argmax along dim-1
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # [True, False, True, True] -> [1,0,1,1] -> 0.75.

print ("accuracy %s"%(accuracy.eval({x: x_, y_: expect})))

learned_output=tf.argmax(y,1)
print (learned_output.eval({x: x_}))


WARNING:tensorflow:From /Anaconda/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
step 0 : entropy 2.77246
step 1 : entropy 2.77143
step 2 : entropy 2.77085
step 3 : entropy 2.76959
step 4 : entropy 2.76784
step 5 : entropy 2.76634
step 6 : entropy 2.76553
step 7 : entropy 2.76255
step 8 : entropy 2.76144
step 9 : entropy 2.75904
step 10 : entropy 2.75606
step 11 : entropy 2.75315
step 12 : entropy 2.75048
step 13 : entropy 2.74479
step 14 : entropy 2.74183
step 15 : entropy 2.73555
step 16 : entropy 2.72891
step 17 : entropy 2.72262
step 18 : entropy 2.71409
step 19 : entropy 2.70208
step 20 : entropy 2.69415
step 21 : entropy 2.67871
step 22 : entropy 2.65813
step 23 : entropy 2.64826
step 24 : entropy 2.63156
step 25 : entropy 2.59343
step 26 : entropy 2.57653
step 27 : entropy 2.55426
step 28 : entropy 2.50167
step 29 : entropy 2.47054
step 30 : entropy 2.43869
step 31 : entropy 2.39575
step 32 : entropy 2.30526
step 33 : entropy 2.30555
step 34 : entropy 2.18424
step 35 : entropy 2.1783
step 36 : entropy 2.05667
step 37 : entropy 2.03237
step 38 : entropy 1.92592
step 39 : entropy 1.8155
step 40 : entropy 1.80992
step 41 : entropy 1.70388
step 42 : entropy 1.65694
step 43 : entropy 1.59074
step 44 : entropy 1.45755
step 45 : entropy 1.43315
step 46 : entropy 1.41658
step 47 : entropy 1.31404
step 48 : entropy 1.23503
step 49 : entropy 1.21564
step 50 : entropy 1.13755
step 51 : entropy 1.06169
step 52 : entropy 1.08159
step 53 : entropy 1.03929
accuracy 1.0
[0 1 1 0]

In [ ]: