In [1]:
%pwd
Out[1]:
In [2]:
import tensorflow as tf
In [3]:
P = tf.Variable(tf.ones([3]),tf.float32)
Q = tf.Variable(tf.random_uniform(shape=[3],minval=0,maxval=1,dtype=tf.float32))
#R=tf.add(P,Q)
R= 0.5*(P+Q)
In [4]:
with tf.Session() as sess:
sess.run( tf.global_variables_initializer() )
#sess.run( fetches= [P, Q], feed_dict=None )
p,q, r = sess.run( [P, Q, R])
print(p,q,r)
In [5]:
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets('MNIST_data',one_hot=True)
In [6]:
x =tf.placeholder(tf.float32,shape=[None,784]) #28*28
w0 =tf.Variable(tf.zeros([784,200]),tf.float32)
b0 =tf.Variable(tf.zeros([200]),tf.float32)
w1 =tf.Variable(tf.zeros([200,10]),tf.float32)
b1 =tf.Variable(tf.zeros([10]),tf.float32)
yt =tf.placeholder(tf.float32,shape=[None,10])
In [7]:
z0=tf.nn.softmax( tf.matmul(x,w0)+b0 )
yp=tf.nn.sigmoid( tf.matmul(z0,w1)+b1 )
cross_entropy=tf.reduce_mean(-tf.reduce_sum(yt*tf.log(yp),reduction_indices=[1]))
#cross_entropy=tf.reduce_mean(tf.reduce_sum(tf.square(yt - yp),reduction_indices=[1]))
train_step=tf.train.AdadeltaOptimizer(0.5).minimize(cross_entropy)
In [8]:
with tf.Session() as sess:
sess.run( tf.global_variables_initializer() )
for i in range(10000):
#batch[0]: images
#batch[1]: labels
batch=mnist.train.next_batch(100)
_,cost =sess.run(fetches=[train_step, cross_entropy],
feed_dict={ x: batch[0], yt: batch[1] })
if i%1000==0:
print(cost)
correct_prediction=tf.equal(tf.argmax(yp,1),tf.arg_max(yt,1))
accuracy=tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
acc=accuracy.eval(feed_dict={x:mnist.test.images, yt:mnist.test.labels})*100
print ("Accuracy of the ANN is {}".format(acc) )
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: