In [9]:
import tensorflow as tf
import numpy as np
x_data = np.array([[0,0],[0,1],[1,0],[1,1]], dtype = np.float32)

In [10]:
y_data = np.array([[0],[1],[1],[0]], dtype = np.float32)


X = tf.placeholder(tf.float32, [None,2])
Y = tf.placeholder(tf.float32, [None,1])

In [11]:
W1 = tf.Variable(tf.random_normal([2,2]), name = 'weight1')
b1 = tf.Variable(tf.random_normal([2]), name = 'bias1')
layer1 = tf.sigmoid(tf.matmul(X,W1)+b1)

In [12]:
W2 = tf.Variable(tf.random_normal([2,1]), name = 'weight2')
b2 = tf.Variable(tf.random_normal([1]), name = 'bias2')

In [13]:
hypothesis = tf.sigmoid(tf.matmul(layer1,W2)+b2)

In [14]:
cost = -tf.reduce_mean(Y*tf.log(hypothesis)+(1-Y)*tf.log(1-hypothesis))

In [18]:
train = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)

In [19]:
predicted = tf.cast(hypothesis>0.5, dtype = tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted,Y),dtype = tf.float32))

In [20]:
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    for step in range(2000):
        sess.run(train, feed_dict = {X:x_data, Y: y_data})
        if step %100 ==0:
            print(step, sess.run(cost,feed_dict = {X:x_data, Y:y_data}), sess.run([W1,W1]))
    h,p,a = sess.run([hypothesis, predicted, accuracy], feed_dict ={X:x_data, Y: y_data})
    print ('\nHypothsis:',h,'\npredicted:',p,'\naccuracy:',a)


0 0.838846 [array([[ 0.83711594,  0.01335154],
       [-0.22521698, -0.2171755 ]], dtype=float32), array([[ 0.83711594,  0.01335154],
       [-0.22521698, -0.2171755 ]], dtype=float32)]
100 0.757547 [array([[ 0.82539535,  0.02334603],
       [-0.22936451, -0.20665795]], dtype=float32), array([[ 0.82539535,  0.02334603],
       [-0.22936451, -0.20665795]], dtype=float32)]
200 0.720887 [array([[ 0.81774074,  0.03017185],
       [-0.22908856, -0.19897534]], dtype=float32), array([[ 0.81774074,  0.03017185],
       [-0.22908856, -0.19897534]], dtype=float32)]
300 0.705351 [array([[ 0.81258547,  0.03439552],
       [-0.22631201, -0.19361711]], dtype=float32), array([[ 0.81258547,  0.03439552],
       [-0.22631201, -0.19361711]], dtype=float32)]
400 0.698937 [array([[ 0.80891883,  0.03673161],
       [-0.22217759, -0.18994629]], dtype=float32), array([[ 0.80891883,  0.03673161],
       [-0.22217759, -0.18994629]], dtype=float32)]
500 0.696307 [array([[ 0.80614465,  0.03778163],
       [-0.21730696, -0.18742873]], dtype=float32), array([[ 0.80614465,  0.03778163],
       [-0.21730696, -0.18742873]], dtype=float32)]
600 0.69522 [array([[ 0.8039192 ,  0.0379835 ],
       [-0.21203369, -0.18567578]], dtype=float32), array([[ 0.8039192 ,  0.0379835 ],
       [-0.21203369, -0.18567578]], dtype=float32)]
700 0.694757 [array([[ 0.80204087,  0.03763609],
       [-0.20654052, -0.18442231]], dtype=float32), array([[ 0.80204087,  0.03763609],
       [-0.20654052, -0.18442231]], dtype=float32)]
800 0.694546 [array([[ 0.80038995,  0.03693682],
       [-0.20093049, -0.18349291]], dtype=float32), array([[ 0.80038995,  0.03693682],
       [-0.20093049, -0.18349291]], dtype=float32)]
900 0.694437 [array([[ 0.79889357,  0.03601391],
       [-0.19526316, -0.18277363]], dtype=float32), array([[ 0.79889357,  0.03601391],
       [-0.19526316, -0.18277363]], dtype=float32)]
1000 0.69437 [array([[ 0.79750717,  0.03494977],
       [-0.18957375, -0.18219133]], dtype=float32), array([[ 0.79750717,  0.03494977],
       [-0.18957375, -0.18219133]], dtype=float32)]
1100 0.69432 [array([[ 0.79620296,  0.03379714],
       [-0.18388341, -0.18169896]], dtype=float32), array([[ 0.79620296,  0.03379714],
       [-0.18388341, -0.18169896]], dtype=float32)]
1200 0.694278 [array([[ 0.7949633 ,  0.03258957],
       [-0.17820482, -0.18126662]], dtype=float32), array([[ 0.7949633 ,  0.03258957],
       [-0.17820482, -0.18126662]], dtype=float32)]
1300 0.694238 [array([[ 0.79377729,  0.03134842],
       [-0.17254558, -0.18087523]], dtype=float32), array([[ 0.79377729,  0.03134842],
       [-0.17254558, -0.18087523]], dtype=float32)]
1400 0.6942 [array([[ 0.79263759,  0.03008722],
       [-0.16691026, -0.18051271]], dtype=float32), array([[ 0.79263759,  0.03008722],
       [-0.16691026, -0.18051271]], dtype=float32)]
1500 0.694164 [array([[ 0.79153985,  0.02881453],
       [-0.16130139, -0.18017142]], dtype=float32), array([[ 0.79153985,  0.02881453],
       [-0.16130139, -0.18017142]], dtype=float32)]
1600 0.694127 [array([[ 0.79048061,  0.02753575],
       [-0.15572037, -0.17984642]], dtype=float32), array([[ 0.79048061,  0.02753575],
       [-0.15572037, -0.17984642]], dtype=float32)]
1700 0.694092 [array([[ 0.78945822,  0.02625432],
       [-0.15016785, -0.17953467]], dtype=float32), array([[ 0.78945822,  0.02625432],
       [-0.15016785, -0.17953467]], dtype=float32)]
1800 0.694057 [array([[ 0.78847104,  0.02497235],
       [-0.14464399, -0.17923425]], dtype=float32), array([[ 0.78847104,  0.02497235],
       [-0.14464399, -0.17923425]], dtype=float32)]
1900 0.694023 [array([[ 0.78751802,  0.02369123],
       [-0.1391487 , -0.17894386]], dtype=float32), array([[ 0.78751802,  0.02369123],
       [-0.1391487 , -0.17894386]], dtype=float32)]

Hypothsis: [[ 0.49143434]
 [ 0.49386162]
 [ 0.50438416]
 [ 0.50829643]] 
predicted: [[ 0.]
 [ 0.]
 [ 1.]
 [ 1.]] 
accuracy: 0.5

In [ ]: