In [1]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[0],[0],[1]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for AND GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)


iteration=  0 cost=  1.10337
iteration=  1000 cost=  0.450279
iteration=  2000 cost=  0.351274
iteration=  3000 cost=  0.291928
iteration=  4000 cost=  0.251623
iteration=  5000 cost=  0.221922
iteration=  6000 cost=  0.198836
iteration=  7000 cost=  0.180233
iteration=  8000 cost=  0.164853
iteration=  9000 cost=  0.151891
iteration=  10000 cost=  0.1408
iteration=  11000 cost=  0.131195
iteration=  12000 cost=  0.12279
Validating output for AND GATE
[[ 0.00514346]
 [ 0.13169937]
 [ 0.12997884]
 [ 0.81422639]]

In [24]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])

weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)



multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)


cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[1],[1],[1]])
with tf.Session() as sess:
   
    tf.global_variables_initializer().run()
    for i in range(12001):
        res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
        if i%1000==0:
            print ("iteration= ",i,"cost= ",res)
    print ("Validating output for OR GATE")
    result=sess.run(z,feed_dict={x:inp})
    print (result)


iteration=  0 cost=  1.11266
iteration=  1000 cost=  0.353893
iteration=  2000 cost=  0.271132
iteration=  3000 cost=  0.218996
iteration=  4000 cost=  0.183017
iteration=  5000 cost=  0.156807
iteration=  6000 cost=  0.136898
iteration=  7000 cost=  0.121283
iteration=  8000 cost=  0.108723
iteration=  9000 cost=  0.098414
iteration=  10000 cost=  0.0898114
iteration=  11000 cost=  0.0825314
iteration=  12000 cost=  0.0762967
Validating output for OR GATE
[[ 0.16042228]
 [ 0.93982804]
 [ 0.93482447]
 [ 0.99914777]]

In [ ]:


In [54]:
import tensorflow as tf

x_ = tf.placeholder(tf.float32, shape=[4,2], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[4,1], name="y-input")

w1 = tf.Variable(tf.random_uniform([2,2], -1, 1), name="weight1")
w2 = tf.Variable(tf.random_uniform([2,1], -1, 1), name="weight2")

Bias1 = tf.Variable(tf.zeros([2]), name="Bias1")
Bias2 = tf.Variable(tf.zeros([1]), name="Bias2")

A2 = tf.sigmoid(tf.matmul(x_, w1) + Bias1)
z = tf.sigmoid(tf.matmul(A2, w2) + Bias2)

cost = tf.reduce_mean(( (y_ * tf.log(z)) + ((1 - y_) * tf.log(1.0 - z)) ) * -1)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
XOR_X = [[0,0],[0,1],[1,0],[1,1]]
XOR_Y = [[0],[1],[1],[0]]

init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(42000,62000):
        sess.run(train_step, feed_dict={x_: XOR_X, y_: XOR_Y})
        if i % 1000 == 0:
            print('Iteration ', i)
            print(' ', sess.run(z, feed_dict={x_: XOR_X, y_: XOR_Y}))
            sess.run(Theta1)
            sess.run(Bias1)
            sess.run(Theta2)
            sess.run(Bias2)
            print('cost ', sess.run(cost, feed_dict={x_: XOR_X, y_: XOR_Y}))


WARNING:tensorflow:From /Anaconda/anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
Iteration  42000
  [[ 0.47313747]
 [ 0.4211584 ]
 [ 0.48363152]
 [ 0.42992091]]
cost  0.698493
Iteration  43000
  [[ 0.51839972]
 [ 0.46932593]
 [ 0.5302788 ]
 [ 0.47947463]]
cost  0.693592
Iteration  44000
  [[ 0.51818401]
 [ 0.47127503]
 [ 0.53024101]
 [ 0.48151109]]
cost  0.693442
Iteration  45000
  [[ 0.51721269]
 [ 0.47216979]
 [ 0.52942288]
 [ 0.48246926]]
cost  0.693312
Iteration  46000
  [[ 0.51635098]
 [ 0.47292283]
 [ 0.52871877]
 [ 0.48328969]]
cost  0.693198
Iteration  47000
  [[ 0.51559991]
 [ 0.47356012]
 [ 0.52813864]
 [ 0.4840042 ]]
cost  0.693093
Iteration  48000
  [[ 0.51494622]
 [ 0.47408801]
 [ 0.5276767 ]
 [ 0.48462394]]
cost  0.692997
Iteration  49000
  [[ 0.51437885]
 [ 0.4745113 ]
 [ 0.52732885]
 [ 0.48515794]]
cost  0.692906
Iteration  50000
  [[ 0.51388848]
 [ 0.47483334]
 [ 0.52709246]
 [ 0.48561344]]
cost  0.692817
Iteration  51000
  [[ 0.51346749]
 [ 0.47505659]
 [ 0.52696639]
 [ 0.48599669]]
cost  0.692729
Iteration  52000
  [[ 0.51310933]
 [ 0.47518221]
 [ 0.52695066]
 [ 0.48631236]]
cost  0.69264
Iteration  53000
  [[ 0.51280832]
 [ 0.47521058]
 [ 0.52704716]
 [ 0.4865647 ]]
cost  0.692548
Iteration  54000
  [[ 0.51255965]
 [ 0.47514096]
 [ 0.52725869]
 [ 0.48675656]]
cost  0.69245
Iteration  55000
  [[ 0.51235902]
 [ 0.4749718 ]
 [ 0.52759004]
 [ 0.48689038]]
cost  0.692344
Iteration  56000
  [[ 0.51220262]
 [ 0.47470021]
 [ 0.52804762]
 [ 0.48696771]]
cost  0.692228
Iteration  57000
  [[ 0.51208675]
 [ 0.47432241]
 [ 0.52863973]
 [ 0.48698935]]
cost  0.692098
Iteration  58000
  [[ 0.51200801]
 [ 0.47383308]
 [ 0.52937728]
 [ 0.48695537]]
cost  0.691951
Iteration  59000
  [[ 0.51196283]
 [ 0.47322544]
 [ 0.53027391]
 [ 0.48686495]]
cost  0.691781
Iteration  60000
  [[ 0.51194739]
 [ 0.47249126]
 [ 0.53134656]
 [ 0.4867163 ]]
cost  0.691584
Iteration  61000
  [[ 0.51195729]
 [ 0.47161996]
 [ 0.53261638]
 [ 0.48650661]]
cost  0.691352

In [ ]:
import tensorflow as tf
hidden1_neuron = 10

def Network(x, weights, bias):
    layer1 = tf.nn.relu(tf.matmul(x, weights['h1']) + bias['h1'])
    layer_final = tf.matmul(layer1, weights['out']) + bias['out']
    return layer_final

weight = {
    'h1' : tf.Variable(tf.random_normal([2, hidden1_neuron])),
    'out': tf.Variable(tf.random_normal([hidden1_neuron, 2]))
}
bias = {
    'h1' : tf.Variable(tf.random_normal([hidden1_neuron])),
    'out': tf.Variable(tf.random_normal([2]))
}

x = tf.placeholder(tf.float32, [None, 2])
y = tf.placeholder(tf.float32, [None, 2])

net = Network(x, weight, bias)

cross_entropy = tf.nn.softmax_cross_entropy_with_logits(net, y)
loss = tf.reduce_mean(cross_entropy)

train_op = tf.train.AdamOptimizer(0.2).minimize(loss)

init_op = tf.initialize_all_variables()

xTrain = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
yTrain = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])

with tf.Session() as sess:
    sess.run(init_op)
    for i in range(5000):
        train_data = sess.run(train_op, feed_dict={x: xTrain, y: yTrain})
        loss_val = sess.run(loss, feed_dict={x: xTrain, y: yTrain})
        if(not(i%500)):
            print(loss_val)

    result = sess.run(net, feed_dict={x:xTrain})
    print(result)