In [1]:
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn

tf.logging.set_verbosity(tf.logging.INFO)

In [2]:


In [3]:
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)


Successfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.
Extracting MNIST_data/train-images-idx3-ubyte.gz
Successfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.
Extracting MNIST_data/train-labels-idx1-ubyte.gz
Successfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.
Extracting MNIST_data/t10k-images-idx3-ubyte.gz
Successfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.
Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

In [9]:
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])

W = tf.Variable(tf.zeros([784,10]), name="W")
b = tf.Variable(tf.zeros([10]), name="b")

y = tf.matmul(x,W) + b

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

saver = tf.train.Saver({"W": W, "b": b})

sess.run(tf.global_variables_initializer())

for _ in range(10000):
    batch = mnist.train.next_batch(100)
    train_step.run(feed_dict={x: batch[0], y_: batch[1]})

saver.save(sess, "model.ckpt")
    
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
print(cross_entropy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))

sess.close()


0.9251
0.274636

In [14]:
sess = tf.InteractiveSession()
x = tf.placeholder(tf.float32, shape=[None, 784], name="x")
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y_")

W = tf.Variable(tf.zeros([784,10]), name="W")
b = tf.Variable(tf.zeros([10]), name="b")

y = tf.matmul(x,W) + b

cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))

train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)

saver = tf.train.Saver({"W": W, "b": b})

sess.run(tf.global_variables_initializer())
saver.restore(sess, "model.ckpt")

#for _ in range(10000):
#    batch = mnist.train.next_batch(100)
#    train_step.run(feed_dict={x: batch[0], y_: batch[1]})
    
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
print(cross_entropy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))

sess.close()


INFO:tensorflow:Restoring parameters from model.ckpt
0.9251
0.274636

In [12]:
for _ in range(1000):
    batch = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch[0], y_: batch[1]})

correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels}))


0.9148

In [39]:
#input_dir_path = r'C:\Users\ericd\Desktop\Projects\playgrounds\python-playground\mnist_tfrecords'
input_dir_path = r'C:\Users\ericd\Desktop\Projects\playgrounds\python-playground\mnist'
filenames = os.listdir(input_dir_path)
filenames = list(filter(lambda f: f.lower().endswith('.tfrecord') or f.lower().endswith('.tfrecords'), filenames))
filenames = list(map(lambda f: os.path.join(input_dir_path, f), filenames))
print("{} files found.".format(len(filenames)))


1 files found.

In [55]:
with tf.Graph().as_default():
    filename_queue = tf.train.string_input_producer(filenames, num_epochs=10)

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)
    features = tf.parse_single_example(
        serialized_example,
        features={
            'image': tf.FixedLenFeature([], tf.string),
            'label': tf.FixedLenFeature([], tf.int64),
        })

    image = tf.decode_raw(features['image'], tf.uint8)
    image.set_shape([28*28])

    image = tf.cast(image, tf.float32) * (1. / 255) - 0.5
    label = tf.cast(features['label'], tf.int32)

    batch_size = 100
    images, labels = tf.train.shuffle_batch(
        [image, label], batch_size=batch_size, num_threads=2,
        capacity=1000 + 3 * batch_size,
        min_after_dequeue=1000)

    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    y = tf.matmul(images, W) + b
    
    onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=onehot_labels, logits=y))

    train_op = tf.train.GradientDescentOptimizer(0.5).minimize(loss)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    sess = tf.Session()

    sess.run(init_op)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    try:
        step = 0
        while not coord.should_stop():
            start_time = time.time()

            _, labels_values, loss_value = sess.run([train_op, labels, loss])

            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration))
            step += 1
    except tf.errors.OutOfRangeError:
        print('Done training for %d epochs, %d steps.' % (10, step))
    finally:
        print('Finished.')
        coord.request_stop()

    coord.join(threads)
    sess.close()


Step 0: loss = 2.30 (0.450 sec)
Step 100: loss = 0.00 (0.039 sec)
Step 200: loss = 0.14 (0.038 sec)
Step 300: loss = 21.28 (0.042 sec)
Step 400: loss = 0.00 (0.041 sec)
Step 500: loss = 0.06 (0.038 sec)
Step 600: loss = 0.34 (0.042 sec)
Step 700: loss = 0.00 (0.042 sec)
Step 800: loss = 0.14 (0.043 sec)
Step 900: loss = 0.23 (0.039 sec)
Step 1000: loss = 0.00 (0.039 sec)
Step 1100: loss = 0.00 (0.039 sec)
Step 1200: loss = 0.09 (0.038 sec)
Step 1300: loss = 0.00 (0.039 sec)
Step 1400: loss = 0.01 (0.039 sec)
Step 1500: loss = 0.01 (0.043 sec)
Step 1600: loss = 0.00 (0.040 sec)
Step 1700: loss = 0.02 (0.039 sec)
Step 1800: loss = 0.01 (0.038 sec)
Step 1900: loss = 0.00 (0.038 sec)
Step 2000: loss = 0.14 (0.039 sec)
Step 2100: loss = 0.07 (0.038 sec)
Step 2200: loss = 0.14 (0.042 sec)
Step 2300: loss = 0.00 (0.039 sec)
Step 2400: loss = 0.07 (0.040 sec)
Step 2500: loss = 0.00 (0.039 sec)
Step 2600: loss = 0.04 (0.039 sec)
Step 2700: loss = 0.04 (0.041 sec)
Step 2800: loss = 0.00 (0.039 sec)
Step 2900: loss = 0.02 (0.040 sec)
Step 3000: loss = 0.02 (0.039 sec)
Step 3100: loss = 0.00 (0.045 sec)
Step 3200: loss = 0.12 (0.040 sec)
Step 3300: loss = 0.00 (0.043 sec)
Step 3400: loss = 0.00 (0.045 sec)
Step 3500: loss = 0.00 (0.046 sec)
Step 3600: loss = 0.06 (0.039 sec)
Step 3700: loss = 0.00 (0.038 sec)
Step 3800: loss = 0.01 (0.040 sec)
Step 3900: loss = 0.11 (0.041 sec)
Step 4000: loss = 0.00 (0.038 sec)
Step 4100: loss = 0.01 (0.038 sec)
Step 4200: loss = 0.07 (0.039 sec)
Step 4300: loss = 0.00 (0.039 sec)
Step 4400: loss = 0.07 (0.041 sec)
Step 4500: loss = 0.10 (0.039 sec)
Step 4600: loss = 0.00 (0.038 sec)
Step 4700: loss = 0.00 (0.039 sec)
Step 4800: loss = 0.00 (0.041 sec)
Step 4900: loss = 0.00 (0.041 sec)
Step 5000: loss = 0.17 (0.039 sec)
Step 5100: loss = 0.02 (0.039 sec)
Step 5200: loss = 0.00 (0.046 sec)
Step 5300: loss = 0.01 (0.039 sec)
Step 5400: loss = 0.01 (0.039 sec)
Step 5500: loss = 0.00 (0.042 sec)
Step 5600: loss = 0.09 (0.039 sec)
Step 5700: loss = 0.16 (0.039 sec)
Step 5800: loss = 0.01 (0.039 sec)
Step 5900: loss = 0.01 (0.039 sec)
Done training for 10 epochs, 6000 steps.
Finished.

In [ ]: