In [1]:
# AND Gate 
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[0],[0],[1]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for AND GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)


iteration=  0 cost=  0.801029
iteration=  1000 cost=  0.455339
iteration=  2000 cost=  0.354529
iteration=  3000 cost=  0.29426
iteration=  4000 cost=  0.253381
iteration=  5000 cost=  0.223296
iteration=  6000 cost=  0.199941
iteration=  7000 cost=  0.181143
iteration=  8000 cost=  0.165616
iteration=  9000 cost=  0.152541
iteration=  10000 cost=  0.14136
iteration=  11000 cost=  0.131683
iteration=  12000 cost=  0.123219
Validating output for AND GATE
[[ 0.00520014]
 [ 0.13042383]
 [ 0.1320622 ]
 [ 0.81363302]]

In [2]:
#OR Gate
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[1],[1],[1]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for AND GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)


iteration=  0 cost=  0.686643
iteration=  1000 cost=  0.322359
iteration=  2000 cost=  0.252791
iteration=  3000 cost=  0.207164
iteration=  4000 cost=  0.174845
iteration=  5000 cost=  0.150842
iteration=  6000 cost=  0.132355
iteration=  7000 cost=  0.117706
iteration=  8000 cost=  0.105835
iteration=  9000 cost=  0.0960342
iteration=  10000 cost=  0.0878172
iteration=  11000 cost=  0.080837
iteration=  12000 cost=  0.0748399
Validating output for AND GATE
[[ 0.1575034 ]
 [ 0.93999124]
 [ 0.93682128]
 [ 0.99919575]]

In [3]:
# XOR Gate
#!/usr/bin/env PYTHONIOENCODING="utf-8" python
"""
A simple neural network learning the AND function
"""
import tensorflow as tf
sess = tf.InteractiveSession()

# Desired input output mapping of XOR function:
x_ = [[0, 0], [0, 1], [1, 0], [1, 1]] # input
#labels=[0,      0,      0,      1]   # output =>
expect=[[1,0],  [0,1],  [0,1], [1,0]] # ONE HOT REPRESENTATION! 'class' [1,0]==0 [0,1]==1

# x = tf.Variable(x_)
x = tf.placeholder("float", [None,2]) #  can we feed directly?
y_ = tf.placeholder("float", [None, 2]) # two output classes

number_hidden_nodes = 20 # 20 outputs to create some room for negatives and positives

W = tf.Variable(tf.random_uniform([2, number_hidden_nodes], -.01, .01))
b = tf.Variable(tf.random_uniform([number_hidden_nodes], -.01, .01))
hidden  = tf.nn.relu(tf.matmul(x,W) + b) # first layer.

 # the XOR function is the first nontrivial function, for which a two layer network is needed.
W2 = tf.Variable(tf.random_uniform([number_hidden_nodes,2], -.1, .1))
b2 = tf.Variable(tf.zeros([2]))
hidden2 = tf.matmul(hidden, W2)#+b2

y = tf.nn.softmax(hidden2)


# Define loss and optimizer
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)

# Train
tf.initialize_all_variables().run()
for step in range(1000):
    feed_dict={x: x_, y_:expect } # feed the net with our inputs and desired outputs.
    e,a=sess.run([cross_entropy,train_step],feed_dict)
    if e<1:break # early stopping yay
    print ("step %d : entropy %s" % (step,e)) # error/loss should decrease over time


# Test trained model
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) # argmax along dim-1
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # [True, False, True, True] -> [1,0,1,1] -> 0.75.

print ("accuracy %s"%(accuracy.eval({x: x_, y_: expect})))

learned_output=tf.argmax(y,1)
print (learned_output.eval({x: x_}))


WARNING:tensorflow:From /Anaconda/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
step 0 : entropy 2.77253
step 1 : entropy 2.77224
step 2 : entropy 2.77209
step 3 : entropy 2.77084
step 4 : entropy 2.77103
step 5 : entropy 2.76961
step 6 : entropy 2.76945
step 7 : entropy 2.76827
step 8 : entropy 2.76777
step 9 : entropy 2.76592
step 10 : entropy 2.76568
step 11 : entropy 2.76315
step 12 : entropy 2.76212
step 13 : entropy 2.75966
step 14 : entropy 2.75753
step 15 : entropy 2.75394
step 16 : entropy 2.75171
step 17 : entropy 2.74628
step 18 : entropy 2.74243
step 19 : entropy 2.73671
step 20 : entropy 2.72955
step 21 : entropy 2.7219
step 22 : entropy 2.71331
step 23 : entropy 2.7017
step 24 : entropy 2.68838
step 25 : entropy 2.67706
step 26 : entropy 2.65426
step 27 : entropy 2.64009
step 28 : entropy 2.61131
step 29 : entropy 2.59318
step 30 : entropy 2.54524
step 31 : entropy 2.53845
step 32 : entropy 2.47498
step 33 : entropy 2.4547
step 34 : entropy 2.41307
step 35 : entropy 2.31096
step 36 : entropy 2.33413
step 37 : entropy 2.20739
step 38 : entropy 2.20213
step 39 : entropy 2.14837
step 40 : entropy 2.01929
step 41 : entropy 1.9762
step 42 : entropy 1.9134
step 43 : entropy 1.85069
step 44 : entropy 1.75618
step 45 : entropy 1.66422
step 46 : entropy 1.66896
step 47 : entropy 1.55273
step 48 : entropy 1.39351
step 49 : entropy 1.35789
step 50 : entropy 1.36953
step 51 : entropy 1.20762
step 52 : entropy 1.14291
step 53 : entropy 1.10386
step 54 : entropy 1.00334
accuracy 1.0
[0 1 1 0]

In [4]:
#XNOR Gate
import tensorflow as tf
sess = tf.InteractiveSession()
x_ = [[0, 0], [0, 1], [1, 0], [1, 1]]
expect=[[0,1],  [1,0],  [1,0], [0,1]] 
x = tf.placeholder("float", [None,2]) 
y_ = tf.placeholder("float", [None, 2]) 

number_hidden_nodes = 20
W = tf.Variable(tf.random_uniform([2, number_hidden_nodes], -.01, .01))
b = tf.Variable(tf.random_uniform([number_hidden_nodes], -.01, .01))
hidden  = tf.nn.relu(tf.matmul(x,W) + b)

 W2 = tf.Variable(tf.random_uniform([number_hidden_nodes,2], -.1, .1))
b2 = tf.Variable(tf.zeros([2]))
hidden2 = tf.matmul(hidden, W2)

y = tf.nn.softmax(hidden2)


cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)


tf.initialize_all_variables().run()
for step in range(1000):
    feed_dict={x: x_, y_:expect }
    e,a=sess.run([cross_entropy,train_step],feed_dict)
    if e<1:break 
    print ("step %d : entropy %s" % (step,e)) 


correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) 

print ("accuracy %s"%(accuracy.eval({x: x_, y_: expect})))

learned_output=tf.argmax(y,1)
print (learned_output.eval({x: x_}))


WARNING:tensorflow:From /Anaconda/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
step 0 : entropy 2.77246
step 1 : entropy 2.77255
step 2 : entropy 2.77229
step 3 : entropy 2.77204
step 4 : entropy 2.77211
step 5 : entropy 2.77175
step 6 : entropy 2.77177
step 7 : entropy 2.77134
step 8 : entropy 2.77131
step 9 : entropy 2.77077
step 10 : entropy 2.77067
step 11 : entropy 2.76999
step 12 : entropy 2.76979
step 13 : entropy 2.76888
step 14 : entropy 2.76854
step 15 : entropy 2.76732
step 16 : entropy 2.76677
step 17 : entropy 2.76511
step 18 : entropy 2.76428
step 19 : entropy 2.76196
step 20 : entropy 2.76078
step 21 : entropy 2.75751
step 22 : entropy 2.75591
step 23 : entropy 2.75121
step 24 : entropy 2.74919
step 25 : entropy 2.7423
step 26 : entropy 2.74006
step 27 : entropy 2.72934
step 28 : entropy 2.72717
step 29 : entropy 2.7111
step 30 : entropy 2.71074
step 31 : entropy 2.68586
step 32 : entropy 2.69014
step 33 : entropy 2.65086
step 34 : entropy 2.66415
step 35 : entropy 2.61398
step 36 : entropy 2.62989
step 37 : entropy 2.5917
step 38 : entropy 2.55227
step 39 : entropy 2.5679
step 40 : entropy 2.4953
step 41 : entropy 2.50599
step 42 : entropy 2.49525
step 43 : entropy 2.41718
step 44 : entropy 2.40789
step 45 : entropy 2.4412
step 46 : entropy 2.35696
step 47 : entropy 2.2917
step 48 : entropy 2.33969
step 49 : entropy 2.36504
step 50 : entropy 2.28082
step 51 : entropy 2.21853
step 52 : entropy 2.17547
step 53 : entropy 2.18795
step 54 : entropy 2.26867
step 55 : entropy 2.1802
step 56 : entropy 2.1178
step 57 : entropy 2.06725
step 58 : entropy 2.01068
step 59 : entropy 1.96132
step 60 : entropy 1.933
step 61 : entropy 2.00145
step 62 : entropy 1.87819
step 63 : entropy 1.79301
step 64 : entropy 1.70951
step 65 : entropy 1.60805
step 66 : entropy 1.5282
step 67 : entropy 1.43705
step 68 : entropy 1.39246
step 69 : entropy 1.34509
step 70 : entropy 1.20048
step 71 : entropy 1.10946
step 72 : entropy 1.01818
accuracy 1.0
[1 0 0 1]

In [ ]: