NOT GATE


In [7]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,1])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([1,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0],[1]])
op=np.array([[1],[0]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for NOT GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)


iteration=  0 cost=  0.959494
iteration=  1000 cost=  0.180884
iteration=  2000 cost=  0.134331
iteration=  3000 cost=  0.108417
iteration=  4000 cost=  0.090659
iteration=  5000 cost=  0.0777345
iteration=  6000 cost=  0.067937
iteration=  7000 cost=  0.0602722
iteration=  8000 cost=  0.0541226
iteration=  9000 cost=  0.0490855
iteration=  10000 cost=  0.0448881
iteration=  11000 cost=  0.0413388
iteration=  12000 cost=  0.0383002
Validating output for NOT GATE
[[ 0.95489216]
 [ 0.02997925]]

OR GATE


In [8]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[1],[1],[1]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for OR GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)


iteration=  0 cost=  0.797819
iteration=  1000 cost=  0.421295
iteration=  2000 cost=  0.316662
iteration=  3000 cost=  0.250593
iteration=  4000 cost=  0.206044
iteration=  5000 cost=  0.174242
iteration=  6000 cost=  0.150497
iteration=  7000 cost=  0.132145
iteration=  8000 cost=  0.117572
iteration=  9000 cost=  0.105744
iteration=  10000 cost=  0.0959697
iteration=  11000 cost=  0.0877697
iteration=  12000 cost=  0.0808008
Validating output for OR GATE
[[ 0.16933417]
 [ 0.93315065]
 [ 0.93478131]
 [ 0.99898213]]

AND GATE


In [ ]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[0],[0],[1]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for AND GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)

X-OR


In [26]:
import tensorflow as tf

x_ = tf.placeholder(tf.float32, shape=[4,2], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[4,1], name="y-input")

w1 = tf.Variable(tf.random_uniform([2,2], -1, 1), name="weight1")
w2 = tf.Variable(tf.random_uniform([2,1], -1, 1), name="weight2")

Bias1 = tf.Variable(tf.zeros([2]), name="Bias1")
Bias2 = tf.Variable(tf.zeros([1]), name="Bias2")

A2 = tf.sigmoid(tf.matmul(x_, w1) + Bias1)
z = tf.sigmoid(tf.matmul(A2, w2) + Bias2)

cost = tf.reduce_mean(( (y_ * tf.log(z)) + ((1 - y_) * tf.log(1.0 - z)) ) * -1)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
XOR_X = [[0,0],[0,1],[1,0],[1,1]]
XOR_Y = [[0],[1],[1],[0]]

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(80000):
        sess.run(train_step, feed_dict={x_: XOR_X, y_: XOR_Y})
        if i % 4000 == 0:
            print('Iteration ', i)
            result=sess.run(z, feed_dict={x_: XOR_X, y_: XOR_Y})
            sess.run(w1)
            sess.run(Bias1)
            sess.run(w2)
            sess.run(Bias2)
print(result)


WARNING:tensorflow:From /home/mahantesh/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
Iteration  0
Iteration  4000
Iteration  8000
Iteration  12000
Iteration  16000
Iteration  20000
Iteration  24000
Iteration  28000
Iteration  32000
Iteration  36000
Iteration  40000
Iteration  44000
Iteration  48000
Iteration  52000
Iteration  56000
Iteration  60000
Iteration  64000
Iteration  68000
Iteration  72000
Iteration  76000
[[ 0.03452263]
 [ 0.94507432]
 [ 0.94481105]
 [ 0.0591119 ]]

X-NOR


In [28]:
import tensorflow as tf

x_ = tf.placeholder(tf.float32, shape=[4,2], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[4,1], name="y-input")

w1 = tf.Variable(tf.random_uniform([2,2], -1, 1), name="weight1")
w2 = tf.Variable(tf.random_uniform([2,1], -1, 1), name="weight2")

Bias1 = tf.Variable(tf.zeros([2]), name="Bias1")
Bias2 = tf.Variable(tf.zeros([1]), name="Bias2")

A2 = tf.sigmoid(tf.matmul(x_, w1) + Bias1)
z = tf.sigmoid(tf.matmul(A2, w2) + Bias2)

cost = tf.reduce_mean(( (y_ * tf.log(z)) + ((1 - y_) * tf.log(1.0 - z)) ) * -1)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
XOR_X = [[0,0],[0,1],[1,0],[1,1]]
XOR_Y = [[1],[0],[0],[1]]

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(80000):
        sess.run(train_step, feed_dict={x_: XOR_X, y_: XOR_Y})
        if i % 4000 == 0:
            print('Iteration ', i)
            result=sess.run(z, feed_dict={x_: XOR_X, y_: XOR_Y})
            sess.run(w1)
            sess.run(Bias1)
            sess.run(w2)
            sess.run(Bias2)
print(result)


WARNING:tensorflow:From /home/mahantesh/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
Iteration  0
Iteration  4000
Iteration  8000
Iteration  12000
Iteration  16000
Iteration  20000
Iteration  24000
Iteration  28000
Iteration  32000
Iteration  36000
Iteration  40000
Iteration  44000
Iteration  48000
Iteration  52000
Iteration  56000
Iteration  60000
Iteration  64000
Iteration  68000
Iteration  72000
Iteration  76000
[[ 0.97785747]
 [ 0.02043997]
 [ 0.02881064]
 [ 0.9813813 ]]

In [ ]: