In [1]:
from __future__ import absolute_import, division, print_function

# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras

# Helper libraries
import numpy as np
import matplotlib.pyplot as plt

print(tf.__version__)


2.0.0-alpha0

In [2]:
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()


Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz
32768/29515 [=================================] - 0s 6us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz
26427392/26421880 [==============================] - 47s 2us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz
8192/5148 [===============================================] - 0s 0us/step
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz
4423680/4422102 [==============================] - 10s 2us/step

In [7]:
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']

In [16]:
#explore the data
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()



In [17]:
train_images = train_images / 255.0
test_images = test_images / 255.0

In [18]:
plt.figure(figsize=(10,10))
for i in range(20):
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i], cmap=plt.cm.binary)
    plt.xlabel(class_names[train_labels[i]])
plt.show()



In [34]:
class MyModel(tf.keras.Model):
    def __init__(self):
        super(MyModel,self).__init__()
        pass
    


def build_model():
    ##Model Architecture 
    model = keras.Sequential([
        keras.layers.Flatten(input_shape=(28,28)),
        keras.layers.Dense(128,activation = 'relu'),
        keras.layers.Dense(10,activation = 'softmax')
    ])
    optimizer = keras.optimizers.Adam
    model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
    
    return model

In [25]:


In [37]:
class PrintDot(keras.callbacks.Callback):
    def on_epoch_end(self, epoch, logs):
        if epoch % 100 == 0: print('')
        print('.',end='')

EPOCHS = 100
model = build_model()
history = model.fit(train_images,train_labels,epochs=EPOCHS, validation_data=[test_images,test_labels],callbacks=[PrintDot()])


Train on 60000 samples, validate on 10000 samples
Epoch 1/10
59712/60000 [============================>.] - ETA: 0s - loss: 0.4952 - accuracy: 0.8249
60000/60000 [==============================] - 4s 69us/sample - loss: 0.4949 - accuracy: 0.8251 - val_loss: 0.4202 - val_accuracy: 0.8516
Epoch 2/10
60000/60000 [==============================] - 4s 75us/sample - loss: 0.3729 - accuracy: 0.8663 - val_loss: 0.3816 - val_accuracy: 0.8625
Epoch 3/10
60000/60000 [==============================] - 4s 75us/sample - loss: 0.3351 - accuracy: 0.8790 - val_loss: 0.3656 - val_accuracy: 0.8650
Epoch 4/10
60000/60000 [==============================] - 5s 76us/sample - loss: 0.3120 - accuracy: 0.8852 - val_loss: 0.3571 - val_accuracy: 0.8714
Epoch 5/10
60000/60000 [==============================] - 5s 77us/sample - loss: 0.2921 - accuracy: 0.8915 - val_loss: 0.3548 - val_accuracy: 0.8752
Epoch 6/10
60000/60000 [==============================] - 4s 74us/sample - loss: 0.2786 - accuracy: 0.8964 - val_loss: 0.3564 - val_accuracy: 0.8759
Epoch 7/10
60000/60000 [==============================] - 4s 72us/sample - loss: 0.2666 - accuracy: 0.9004 - val_loss: 0.3414 - val_accuracy: 0.8775
Epoch 8/10
60000/60000 [==============================] - 4s 68us/sample - loss: 0.2535 - accuracy: 0.9049 - val_loss: 0.3303 - val_accuracy: 0.8798
Epoch 9/10
60000/60000 [==============================] - 4s 72us/sample - loss: 0.2442 - accuracy: 0.9087 - val_loss: 0.3322 - val_accuracy: 0.8804
Epoch 10/10
60000/60000 [==============================] - 4s 66us/sample - loss: 0.2359 - accuracy: 0.9110 - val_loss: 0.3336 - val_accuracy: 0.8807

In [29]:



Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 4s 59us/sample - loss: 0.1396 - accuracy: 0.9481 - val_loss: 0.4028 - val_accuracy: 0.8866
Epoch 2/10
60000/60000 [==============================] - 4s 68us/sample - loss: 0.1360 - accuracy: 0.9491 - val_loss: 0.4250 - val_accuracy: 0.8854
Epoch 3/10
60000/60000 [==============================] - 4s 68us/sample - loss: 0.1324 - accuracy: 0.9499 - val_loss: 0.4453 - val_accuracy: 0.8875
Epoch 4/10
60000/60000 [==============================] - 4s 69us/sample - loss: 0.1307 - accuracy: 0.9505 - val_loss: 0.4311 - val_accuracy: 0.8859
Epoch 5/10
60000/60000 [==============================] - 4s 73us/sample - loss: 0.1281 - accuracy: 0.9513 - val_loss: 0.4369 - val_accuracy: 0.8830
Epoch 6/10
60000/60000 [==============================] - 4s 68us/sample - loss: 0.1253 - accuracy: 0.9529 - val_loss: 0.4291 - val_accuracy: 0.8850
Epoch 7/10
60000/60000 [==============================] - 4s 59us/sample - loss: 0.1231 - accuracy: 0.9541 - val_loss: 0.4488 - val_accuracy: 0.8847
Epoch 8/10
60000/60000 [==============================] - 4s 61us/sample - loss: 0.1216 - accuracy: 0.9548 - val_loss: 0.4614 - val_accuracy: 0.8821
Epoch 9/10
60000/60000 [==============================] - 4s 72us/sample - loss: 0.1176 - accuracy: 0.9559 - val_loss: 0.4667 - val_accuracy: 0.8814
Epoch 10/10
60000/60000 [==============================] - 4s 65us/sample - loss: 0.1167 - accuracy: 0.9570 - val_loss: 0.4593 - val_accuracy: 0.8854

In [31]:
results = model.evaluate(test_images,test_labels)
print(results)


10000/10000 [==============================] - 0s 26us/sample - loss: 0.4593 - accuracy: 0.8854
[0.45925962738990783, 0.8854]

In [35]:
history_dicts = history.history
print(history_dicts.keys())


dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])

In [36]:
model.summary()


Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
flatten (Flatten)            (None, 784)               0         
_________________________________________________________________
dense (Dense)                (None, 128)               100480    
_________________________________________________________________
dense_1 (Dense)              (None, 10)                1290      
=================================================================
Total params: 101,770
Trainable params: 101,770
Non-trainable params: 0
_________________________________________________________________

In [ ]:
model =