In [1]:
# AND Gate
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])
weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)
multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)
cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[0],[0],[1]])
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(12001):
res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
if i%1000==0:
print ("iteration= ",i,"cost= ",res)
print ("Validating output for AND GATE")
result=sess.run(z,feed_dict={x:inp})
print (result)
In [2]:
#OR Gate
import tensorflow as tf
import numpy as np
x=tf.placeholder(tf.float32,shape=[None,2])
y=tf.placeholder(tf.float32,shape=[None,1])
weights=tf.Variable(tf.random_normal([2,1]),dtype=tf.float32)
bias=tf.Variable(tf.random_normal([1]),dtype=tf.float32)
multiply1=tf.add(tf.matmul(x,weights),bias)
z=tf.nn.sigmoid(multiply1)
cost=tf.reduce_mean((y*tf.log(z)+(1-y)*tf.log(1-z))*-1)
optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(cost)
inp=np.array([[0,0],[0,1],[1,0],[1,1]])
op=np.array([[0],[1],[1],[1]])
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(12001):
res,_=sess.run([cost,optimizer],feed_dict={x:inp,y:op})
if i%1000==0:
print ("iteration= ",i,"cost= ",res)
print ("Validating output for AND GATE")
result=sess.run(z,feed_dict={x:inp})
print (result)
In [3]:
# XOR Gate
#!/usr/bin/env PYTHONIOENCODING="utf-8" python
"""
A simple neural network learning the AND function
"""
import tensorflow as tf
sess = tf.InteractiveSession()
# Desired input output mapping of XOR function:
x_ = [[0, 0], [0, 1], [1, 0], [1, 1]] # input
#labels=[0, 0, 0, 1] # output =>
expect=[[1,0], [0,1], [0,1], [1,0]] # ONE HOT REPRESENTATION! 'class' [1,0]==0 [0,1]==1
# x = tf.Variable(x_)
x = tf.placeholder("float", [None,2]) # can we feed directly?
y_ = tf.placeholder("float", [None, 2]) # two output classes
number_hidden_nodes = 20 # 20 outputs to create some room for negatives and positives
W = tf.Variable(tf.random_uniform([2, number_hidden_nodes], -.01, .01))
b = tf.Variable(tf.random_uniform([number_hidden_nodes], -.01, .01))
hidden = tf.nn.relu(tf.matmul(x,W) + b) # first layer.
# the XOR function is the first nontrivial function, for which a two layer network is needed.
W2 = tf.Variable(tf.random_uniform([number_hidden_nodes,2], -.1, .1))
b2 = tf.Variable(tf.zeros([2]))
hidden2 = tf.matmul(hidden, W2)#+b2
y = tf.nn.softmax(hidden2)
# Define loss and optimizer
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)
# Train
tf.initialize_all_variables().run()
for step in range(1000):
feed_dict={x: x_, y_:expect } # feed the net with our inputs and desired outputs.
e,a=sess.run([cross_entropy,train_step],feed_dict)
if e<1:break # early stopping yay
print ("step %d : entropy %s" % (step,e)) # error/loss should decrease over time
# Test trained model
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) # argmax along dim-1
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) # [True, False, True, True] -> [1,0,1,1] -> 0.75.
print ("accuracy %s"%(accuracy.eval({x: x_, y_: expect})))
learned_output=tf.argmax(y,1)
print (learned_output.eval({x: x_}))
In [4]:
#XNOR Gate
import tensorflow as tf
sess = tf.InteractiveSession()
x_ = [[0, 0], [0, 1], [1, 0], [1, 1]]
expect=[[0,1], [1,0], [1,0], [0,1]]
x = tf.placeholder("float", [None,2])
y_ = tf.placeholder("float", [None, 2])
number_hidden_nodes = 20
W = tf.Variable(tf.random_uniform([2, number_hidden_nodes], -.01, .01))
b = tf.Variable(tf.random_uniform([number_hidden_nodes], -.01, .01))
hidden = tf.nn.relu(tf.matmul(x,W) + b)
W2 = tf.Variable(tf.random_uniform([number_hidden_nodes,2], -.1, .1))
b2 = tf.Variable(tf.zeros([2]))
hidden2 = tf.matmul(hidden, W2)
y = tf.nn.softmax(hidden2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(cross_entropy)
tf.initialize_all_variables().run()
for step in range(1000):
feed_dict={x: x_, y_:expect }
e,a=sess.run([cross_entropy,train_step],feed_dict)
if e<1:break
print ("step %d : entropy %s" % (step,e))
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print ("accuracy %s"%(accuracy.eval({x: x_, y_: expect})))
learned_output=tf.argmax(y,1)
print (learned_output.eval({x: x_}))
In [ ]: