In [1]:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist=input_data.read_data_sets("/tmp/data/", one_hot=True) #one component is on, all others are off
#10 classes, 0 through 9
#one-hot outputs 0=[1,0,0,0,0,0,0,0,0], being the 1 is in the algorithm's guess (0)
#3=[0,0,0,1,0,0,0,0,0]
n_nodes_hl1=500 #hl1 = hidden layer 1
n_nodes_hl2=500
n_nodes_hl3=500
n_classes=10 #number of categories
batch_size=100 #divies up the data to be more efficient, as opposed to loading all samples at once

x=tf.placeholder('float',[None, 784])
y=tf.placeholder('float')

def neural_network_model(data):
    #(inputs*weights)+biases
    hidden_1_layer={'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])), 
                    'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}
    
    hidden_2_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])), 
                    'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}
    
    hidden_3_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])), 
                    'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}
    
    output_layer={'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
                  'biases': tf.Variable(tf.random_normal([n_classes]))}
    
    l1=tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
    l1=tf.nn.relu(l1)
    
    l2=tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
    l2=tf.nn.relu(l2)
    
    l3=tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
    l3=tf.nn.relu(l3)
    
    output=tf.matmul(l3, output_layer['weights'])+ output_layer['biases']
    
    return output
  
def train_neural_network(x):
    prediction=neural_network_model(x)
    cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
    optimizer=tf.train.AdamOptimizer().minimize(cost)
    
    hm_epochs=10
    
    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        
        for epoch in range(hm_epochs):
            epoch_loss=0
            for _ in range(int(mnist.train.num_examples/batch_size)):
                epoch_x,epoch_y=mnist.train.next_batch(batch_size)
                _,c=sess.run([optimizer,cost], feed_dict={x:epoch_x, y:epoch_y})
                epoch_loss+=c
            print('Epoch', epoch, 'completed out of ', hm_epochs, 'loss:', epoch_loss)
            
        correct=tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
        
        accuracy=tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:', accuracy.eval({x:mnist.test.images, y:mnist.test.labels}))
        

        
        
train_neural_network(x)


Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting /tmp/data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting /tmp/data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting /tmp/data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting /tmp/data/t10k-labels-idx1-ubyte.gz
WARNING:tensorflow:From <ipython-input-1-92265976c34d>:45: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:

Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.

See tf.nn.softmax_cross_entropy_with_logits_v2.

WARNING:tensorflow:From /anaconda3/lib/python3.6/site-packages/tensorflow/python/util/tf_should_use.py:118: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
Epoch 0 completed out of  10 loss: 1664569.60834
Epoch 1 completed out of  10 loss: 414550.968437
Epoch 2 completed out of  10 loss: 229022.354944
Epoch 3 completed out of  10 loss: 136420.393392
Epoch 4 completed out of  10 loss: 88019.3560204
Epoch 5 completed out of  10 loss: 56024.820509
Epoch 6 completed out of  10 loss: 37434.4423951
Epoch 7 completed out of  10 loss: 29640.3100017
Epoch 8 completed out of  10 loss: 24399.9572706
Epoch 9 completed out of  10 loss: 23351.0056713
Accuracy: 0.9534

In [ ]:


In [ ]: