In [1]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])
weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)
multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)
cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[0],[0],[1]])
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(12001):
res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
if i%1000==0:
print ("iteration= ",i,"cost= ",res)
print ("Validating output for AND GATE")
result=sess.run(z,feed_dict={x:inp})
print (result)
In [24]:
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])
weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)
multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)
cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[1],[1],[1]])
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(12001):
res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
if i%1000==0:
print ("iteration= ",i,"cost= ",res)
print ("Validating output for OR GATE")
result=sess.run(z,feed_dict={x:inp})
print (result)
In [ ]:
In [54]:
import tensorflow as tf
x_ = tf.placeholder(tf.float32, shape=[4,2], name="x-input")
y_ = tf.placeholder(tf.float32, shape=[4,1], name="y-input")
w1 = tf.Variable(tf.random_uniform([2,2], -1, 1), name="weight1")
w2 = tf.Variable(tf.random_uniform([2,1], -1, 1), name="weight2")
Bias1 = tf.Variable(tf.zeros([2]), name="Bias1")
Bias2 = tf.Variable(tf.zeros([1]), name="Bias2")
A2 = tf.sigmoid(tf.matmul(x_, w1) + Bias1)
z = tf.sigmoid(tf.matmul(A2, w2) + Bias2)
cost = tf.reduce_mean(( (y_ * tf.log(z)) + ((1 - y_) * tf.log(1.0 - z)) ) * -1)
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
XOR_X = [[0,0],[0,1],[1,0],[1,1]]
XOR_Y = [[0],[1],[1],[0]]
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for i in range(42000,62000):
sess.run(train_step, feed_dict={x_: XOR_X, y_: XOR_Y})
if i % 1000 == 0:
print('Iteration ', i)
print(' ', sess.run(z, feed_dict={x_: XOR_X, y_: XOR_Y}))
sess.run(Theta1)
sess.run(Bias1)
sess.run(Theta2)
sess.run(Bias2)
print('cost ', sess.run(cost, feed_dict={x_: XOR_X, y_: XOR_Y}))
In [ ]:
import tensorflow as tf
hidden1_neuron = 10
def Network(x, weights, bias):
layer1 = tf.nn.relu(tf.matmul(x, weights['h1']) + bias['h1'])
layer_final = tf.matmul(layer1, weights['out']) + bias['out']
return layer_final
weight = {
'h1' : tf.Variable(tf.random_normal([2, hidden1_neuron])),
'out': tf.Variable(tf.random_normal([hidden1_neuron, 2]))
}
bias = {
'h1' : tf.Variable(tf.random_normal([hidden1_neuron])),
'out': tf.Variable(tf.random_normal([2]))
}
x = tf.placeholder(tf.float32, [None, 2])
y = tf.placeholder(tf.float32, [None, 2])
net = Network(x, weight, bias)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(net, y)
loss = tf.reduce_mean(cross_entropy)
train_op = tf.train.AdamOptimizer(0.2).minimize(loss)
init_op = tf.initialize_all_variables()
xTrain = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
yTrain = np.array([[1, 0], [0, 1], [0, 1], [1, 0]])
with tf.Session() as sess:
sess.run(init_op)
for i in range(5000):
train_data = sess.run(train_op, feed_dict={x: xTrain, y: yTrain})
loss_val = sess.run(loss, feed_dict={x: xTrain, y: yTrain})
if(not(i%500)):
print(loss_val)
result = sess.run(net, feed_dict={x:xTrain})
print(result)