In [1]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[1],[0],[0],[1]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for XNOR FOR 2 INPUT GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)


iteration=  0 cost=  1.16796
iteration=  1000 cost=  0.725195
iteration=  2000 cost=  0.705397
iteration=  3000 cost=  0.698085
iteration=  4000 cost=  0.695208
iteration=  5000 cost=  0.694031
iteration=  6000 cost=  0.693534
iteration=  7000 cost=  0.693319
iteration=  8000 cost=  0.693224
iteration=  9000 cost=  0.693182
iteration=  10000 cost=  0.693163
iteration=  11000 cost=  0.693154
iteration=  12000 cost=  0.69315
Validating output for XNOR FOR 2 INPUT GATE
[[ 0.49793521]
 [ 0.49979982]
 [ 0.49955216]
 [ 0.50141674]]

In [ ]: