In [1]:
from keras.layers import Input, Dense, Dropout, BatchNormalization
from keras.models import Model
from keras.datasets import mnist
from keras.callbacks import EarlyStopping
from keras.models import Sequential, load_model
from keras.optimizers import RMSprop
from keras.callbacks import TensorBoard
from __future__ import print_function
from IPython.display import SVG, Image
from keras import regularizers, Model
from matplotlib import rc
from keras.utils import plot_model

import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pydot
import graphviz


Using TensorFlow backend.

In [2]:
%matplotlib inline
font = {'family' : 'monospace',
        'weight' : 'bold',
        'size'   : 20}

rc('font', **font)

In [3]:
num_classes = 10
input_dim = 784
batch_size = 256

(x_train, y_train), (x_val, y_val) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_val = x_val.astype('float32') / 255.

# x_train = np.concatenate((x_train, x_val))

x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_val = x_val.reshape((len(x_val), np.prod(x_val.shape[1:])))
# print(x_train.shape)

y_train = keras.utils.to_categorical(y_train, num_classes)
y_val = keras.utils.to_categorical(y_val, num_classes)

Simple feed forward network with one hidden layer


In [4]:
hidden1_dim = 512
hidden2_dim = 512

In [5]:
input_data = Input(shape=(input_dim,), dtype='float32', name='main_input')
x = Dense(hidden1_dim, activation='relu', kernel_initializer='normal')(input_data)
x = Dropout(0.2)(x)
x = Dense(hidden2_dim, activation='relu', kernel_initializer='normal')(x)
x = Dropout(0.2)(x)
output_layer = Dense(num_classes, activation='sigmoid', kernel_initializer='normal')(x)

model = Model(input_data, output_layer)

model.compile(loss='binary_crossentropy',
              optimizer=RMSprop(),
              metrics=['accuracy'])

In [6]:
# model = keras.models.load_model('models/model.h5')
model.fit(x_train, y_train, 
          batch_size=batch_size,
          epochs=20,
          shuffle=True,
          verbose=0,
         validation_data=(x_val, y_val))
# model.save('models/model.h5')


Out[6]:
<keras.callbacks.History at 0x243d5daf278>

In [7]:
score = model.evaluate(x_val, y_val, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


Test loss: 0.016705912156
Test accuracy: 0.996639996147

In [8]:
fig = plt.figure(figsize=(20,10))
plt.plot(model.history.history['val_acc'])
plt.plot(model.history.history['acc'])
plt.axhline(y=score[1], c="red")
plt.text(0, score[1], "test: " + str(round(score[1], 4)), fontdict=font)
plt.title('model accuracy for neural net with 2 hidden layers')
plt.ylabel('accuracy')
plt.xlabel('epochs')
plt.legend(['valid', 'train'], loc='lower right')
plt.show()



In [9]:
plot_model(model, to_file='images/MNIST_FF_standard.png', show_shapes=True, show_layer_names=False, rankdir='TB')

In [10]:
Image("images/MNIST_FF_standard.png")


Out[10]:

Deep Autoencoder


In [11]:
encoding_dim1 = 128
encoding_dim2 = 64
encoding_dim3 = 32
decoding_dim1 = 64
decoding_dim2 = 128
decoding_dim3 = input_dim
epochs = 100
batch_size = 256

In [12]:
input_img = Input(shape=(input_dim,))
encoded = Dense(encoding_dim1, activation='relu')(input_img)
encoded = Dense(encoding_dim2, activation='relu')(encoded)
encoded = Dense(encoding_dim3, activation='relu')(encoded)

decoded = Dense(decoding_dim1, activation='relu')(encoded)
decoded = Dense(decoding_dim2, activation='relu')(decoded)
decoded = Dense(decoding_dim3, activation='sigmoid')(decoded)

In [13]:
deep_autoencoder = Model(input_img, decoded)
deep_autoencoder.compile(optimizer=RMSprop(), loss='binary_crossentropy')

In [14]:
# deep_autoencoder = keras.models.load_model('models/deep_autoencoder.h5')
deep_autoencoder.fit(x_train, x_train,
                epochs=epochs,
                batch_size=batch_size,
                shuffle=True,
                validation_data=(x_val, x_val))
# deep_autoencoder.save('models/deep_autoencoder.h5')


Train on 60000 samples, validate on 10000 samples
Epoch 1/100
60000/60000 [==============================] - 8s 132us/step - loss: 0.2256 - val_loss: 0.1779
Epoch 2/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1670 - val_loss: 0.1576
Epoch 3/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1514 - val_loss: 0.1433
Epoch 4/100
60000/60000 [==============================] - 6s 102us/step - loss: 0.1409 - val_loss: 0.1331
Epoch 5/100
60000/60000 [==============================] - 6s 104us/step - loss: 0.1334 - val_loss: 0.1268
Epoch 6/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1281 - val_loss: 0.1245
Epoch 7/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1237 - val_loss: 0.1229
Epoch 8/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1205 - val_loss: 0.1165
Epoch 9/100
60000/60000 [==============================] - 6s 104us/step - loss: 0.1178 - val_loss: 0.1148
Epoch 10/100
60000/60000 [==============================] - 6s 107us/step - loss: 0.1156 - val_loss: 0.1135
Epoch 11/100
60000/60000 [==============================] - 6s 107us/step - loss: 0.1136 - val_loss: 0.1119
Epoch 12/100
60000/60000 [==============================] - 6s 106us/step - loss: 0.1118 - val_loss: 0.1098
Epoch 13/100
60000/60000 [==============================] - 6s 102us/step - loss: 0.1104 - val_loss: 0.1076
Epoch 14/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.1091 - val_loss: 0.1095
Epoch 15/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1079 - val_loss: 0.1045
Epoch 16/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1068 - val_loss: 0.1046
Epoch 17/100
60000/60000 [==============================] - 7s 122us/step - loss: 0.1059 - val_loss: 0.1045
Epoch 18/100
60000/60000 [==============================] - 7s 119us/step - loss: 0.1050 - val_loss: 0.1045
Epoch 19/100
60000/60000 [==============================] - 6s 102us/step - loss: 0.1041 - val_loss: 0.1048
Epoch 20/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.1034 - val_loss: 0.1031
Epoch 21/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.1027 - val_loss: 0.1028
Epoch 22/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.1021 - val_loss: 0.1025
Epoch 23/100
60000/60000 [==============================] - 6s 102us/step - loss: 0.1015 - val_loss: 0.1007
Epoch 24/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.1009 - val_loss: 0.1021
Epoch 25/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.1004 - val_loss: 0.0993
Epoch 26/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0999 - val_loss: 0.1010
Epoch 27/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0994 - val_loss: 0.1015
Epoch 28/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0990 - val_loss: 0.0988
Epoch 29/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0986 - val_loss: 0.0958
Epoch 30/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0983 - val_loss: 0.0989
Epoch 31/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0979 - val_loss: 0.0949
Epoch 32/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0976 - val_loss: 0.0965
Epoch 33/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0973 - val_loss: 0.0968
Epoch 34/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0970 - val_loss: 0.0961
Epoch 35/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0967 - val_loss: 0.0965
Epoch 36/100
60000/60000 [==============================] - 7s 123us/step - loss: 0.0965 - val_loss: 0.0951
Epoch 37/100
60000/60000 [==============================] - 7s 118us/step - loss: 0.0962 - val_loss: 0.0950
Epoch 38/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0960 - val_loss: 0.0946
Epoch 39/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0957 - val_loss: 0.0977
Epoch 40/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0955 - val_loss: 0.0946
Epoch 41/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0953 - val_loss: 0.0951
Epoch 42/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0951 - val_loss: 0.0954
Epoch 43/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0949 - val_loss: 0.0948
Epoch 44/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.0947 - val_loss: 0.0979
Epoch 45/100
60000/60000 [==============================] - 6s 102us/step - loss: 0.0945 - val_loss: 0.0964
Epoch 46/100
60000/60000 [==============================] - 6s 102us/step - loss: 0.0943 - val_loss: 0.0964
Epoch 47/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0942 - val_loss: 0.0923
Epoch 48/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0940 - val_loss: 0.0918
Epoch 49/100
60000/60000 [==============================] - 6s 106us/step - loss: 0.0938 - val_loss: 0.0939
Epoch 50/100
60000/60000 [==============================] - 7s 122us/step - loss: 0.0937 - val_loss: 0.0917
Epoch 51/100
60000/60000 [==============================] - 6s 107us/step - loss: 0.0935 - val_loss: 0.0924
Epoch 52/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0933 - val_loss: 0.0949
Epoch 53/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0932 - val_loss: 0.0923
Epoch 54/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0931 - val_loss: 0.0925
Epoch 55/100
60000/60000 [==============================] - 8s 127us/step - loss: 0.0929 - val_loss: 0.0919
Epoch 56/100
60000/60000 [==============================] - 8s 140us/step - loss: 0.0928 - val_loss: 0.0920
Epoch 57/100
60000/60000 [==============================] - 7s 117us/step - loss: 0.0927 - val_loss: 0.0938
Epoch 58/100
60000/60000 [==============================] - 7s 120us/step - loss: 0.0925 - val_loss: 0.0910
Epoch 59/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0923 - val_loss: 0.0935
Epoch 60/100
60000/60000 [==============================] - 7s 111us/step - loss: 0.0923 - val_loss: 0.0931
Epoch 61/100
60000/60000 [==============================] - 6s 106us/step - loss: 0.0921 - val_loss: 0.0938
Epoch 62/100
60000/60000 [==============================] - 6s 99us/step - loss: 0.0920 - val_loss: 0.0923
Epoch 63/100
60000/60000 [==============================] - 6s 98us/step - loss: 0.0919 - val_loss: 0.0900
Epoch 64/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0918 - val_loss: 0.0922
Epoch 65/100
60000/60000 [==============================] - 6s 100us/step - loss: 0.0916 - val_loss: 0.0915
Epoch 66/100
60000/60000 [==============================] - 6s 105us/step - loss: 0.0915 - val_loss: 0.0929
Epoch 67/100
60000/60000 [==============================] - 7s 110us/step - loss: 0.0914 - val_loss: 0.0921
Epoch 68/100
60000/60000 [==============================] - 7s 116us/step - loss: 0.0912 - val_loss: 0.0907
Epoch 69/100
60000/60000 [==============================] - 7s 121us/step - loss: 0.0912 - val_loss: 0.0908
Epoch 70/100
60000/60000 [==============================] - 7s 124us/step - loss: 0.0911 - val_loss: 0.0915
Epoch 71/100
60000/60000 [==============================] - 7s 123us/step - loss: 0.0910 - val_loss: 0.0907
Epoch 72/100
60000/60000 [==============================] - 7s 121us/step - loss: 0.0909 - val_loss: 0.0899
Epoch 73/100
60000/60000 [==============================] - 9s 149us/step - loss: 0.0907 - val_loss: 0.0912
Epoch 74/100
60000/60000 [==============================] - 8s 133us/step - loss: 0.0907 - val_loss: 0.0905
Epoch 75/100
60000/60000 [==============================] - 6s 107us/step - loss: 0.0905 - val_loss: 0.0917
Epoch 76/100
60000/60000 [==============================] - 7s 122us/step - loss: 0.0905 - val_loss: 0.0891
Epoch 77/100
60000/60000 [==============================] - 7s 112us/step - loss: 0.0904 - val_loss: 0.0906
Epoch 78/100
60000/60000 [==============================] - 7s 113us/step - loss: 0.0903 - val_loss: 0.0919
Epoch 79/100
60000/60000 [==============================] - 7s 115us/step - loss: 0.0901 - val_loss: 0.0913
Epoch 80/100
60000/60000 [==============================] - 7s 118us/step - loss: 0.0901 - val_loss: 0.0902
Epoch 81/100
60000/60000 [==============================] - 6s 106us/step - loss: 0.0900 - val_loss: 0.0892
Epoch 82/100
60000/60000 [==============================] - 6s 106us/step - loss: 0.0899 - val_loss: 0.0893
Epoch 83/100
60000/60000 [==============================] - 6s 103us/step - loss: 0.0898 - val_loss: 0.0894
Epoch 84/100
60000/60000 [==============================] - 6s 104us/step - loss: 0.0897 - val_loss: 0.0896
Epoch 85/100
60000/60000 [==============================] - 6s 106us/step - loss: 0.0897 - val_loss: 0.0906
Epoch 86/100
60000/60000 [==============================] - 7s 118us/step - loss: 0.0896 - val_loss: 0.0893
Epoch 87/100
60000/60000 [==============================] - 7s 111us/step - loss: 0.0895 - val_loss: 0.0896
Epoch 88/100
60000/60000 [==============================] - 7s 109us/step - loss: 0.0894 - val_loss: 0.0905
Epoch 89/100
60000/60000 [==============================] - 7s 115us/step - loss: 0.0894 - val_loss: 0.0877
Epoch 90/100
60000/60000 [==============================] - 7s 122us/step - loss: 0.0893 - val_loss: 0.0903
Epoch 91/100
60000/60000 [==============================] - 9s 143us/step - loss: 0.0892 - val_loss: 0.0873
Epoch 92/100
60000/60000 [==============================] - 7s 114us/step - loss: 0.0892 - val_loss: 0.0899
Epoch 93/100
60000/60000 [==============================] - 7s 123us/step - loss: 0.0891 - val_loss: 0.0921
Epoch 94/100
60000/60000 [==============================] - 7s 124us/step - loss: 0.0890 - val_loss: 0.0901
Epoch 95/100
60000/60000 [==============================] - 6s 106us/step - loss: 0.0890 - val_loss: 0.0888
Epoch 96/100
60000/60000 [==============================] - 6s 108us/step - loss: 0.0889 - val_loss: 0.0900
Epoch 97/100
60000/60000 [==============================] - 7s 109us/step - loss: 0.0888 - val_loss: 0.0879
Epoch 98/100
60000/60000 [==============================] - 6s 98us/step - loss: 0.0888 - val_loss: 0.0885
Epoch 99/100
60000/60000 [==============================] - 6s 101us/step - loss: 0.0887 - val_loss: 0.0871
Epoch 100/100
60000/60000 [==============================] - 6s 104us/step - loss: 0.0887 - val_loss: 0.0894
Out[14]:
<keras.callbacks.History at 0x243db3619e8>

In [15]:
score = deep_autoencoder.evaluate(x_val, x_val, verbose=0)
print(score)


0.0894073861599

In [16]:
decoded_imgs = deep_autoencoder.predict(x_val)

In [17]:
n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_val[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()


Deep Autoencoder Classifier


In [18]:
dc_encoded = Dense(encoding_dim1, activation='relu')(input_img)
dc_encoded = Dense(encoding_dim2, activation='relu')(dc_encoded)
dc_encoded = Dense(encoding_dim3, activation='relu')(dc_encoded)
dc_class_layer = Dense(num_classes, activation='sigmoid')(dc_encoded)

dc = Model(inputs=input_img, outputs=dc_class_layer)

dc.layers[1].set_weights(deep_autoencoder.layers[1].get_weights())
dc.layers[2].set_weights(deep_autoencoder.layers[2].get_weights())
dc.layers[3].set_weights(deep_autoencoder.layers[3].get_weights())
dc.compile(loss='binary_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])

In [19]:
# dc = keras.models.load_model('models/dc.h5')
dc.fit(x_train, y_train
       , epochs=7
       , verbose=True
       , batch_size=batch_size
       , validation_data=(x_val, y_val)
       , shuffle=True)
# dc.save('models/dc.h5')


Train on 60000 samples, validate on 10000 samples
Epoch 1/7
60000/60000 [==============================] - 2s 40us/step - loss: 0.1000 - acc: 0.9660 - val_loss: 0.0411 - val_acc: 0.9861
Epoch 2/7
60000/60000 [==============================] - 2s 35us/step - loss: 0.0294 - acc: 0.9903 - val_loss: 0.0248 - val_acc: 0.9918
Epoch 3/7
60000/60000 [==============================] - 2s 34us/step - loss: 0.0210 - acc: 0.9931 - val_loss: 0.0202 - val_acc: 0.9932
Epoch 4/7
60000/60000 [==============================] - 2s 34us/step - loss: 0.0164 - acc: 0.9945 - val_loss: 0.0177 - val_acc: 0.9941
Epoch 5/7
60000/60000 [==============================] - 2s 34us/step - loss: 0.0132 - acc: 0.9956 - val_loss: 0.0155 - val_acc: 0.9947
Epoch 6/7
60000/60000 [==============================] - 2s 34us/step - loss: 0.0111 - acc: 0.9963 - val_loss: 0.0145 - val_acc: 0.9954
Epoch 7/7
60000/60000 [==============================] - 2s 34us/step - loss: 0.0092 - acc: 0.9970 - val_loss: 0.0160 - val_acc: 0.9949
Out[19]:
<keras.callbacks.History at 0x243dd27b080>

In [20]:
df_score = dc.evaluate(x_val, y_val)
print('Test loss:', df_score[0])
print('Test accuracy:', df_score[1])


10000/10000 [==============================] - 0s 39us/step
Test loss: 0.0159673747035
Test accuracy: 0.994879998016

Stacked Autoencoder

First layer


In [21]:
encoding_dim1 = 128

input_img = Input(shape=(input_dim,))
encoded1 = Dense(encoding_dim1, activation='relu')(input_img)
decoded1 = Dense(input_dim, activation='relu')(encoded1)
class1 = Dense(num_classes, activation='softmax')(decoded1)

autoencoder1 = Model(input_img, class1)
autoencoder1.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder1 = Model(input_img, encoded1)
encoder1.compile(optimizer=RMSprop(), loss='binary_crossentropy')

In [22]:
plot_model(autoencoder1, to_file='images/sae_first_layer.png', show_shapes=True, show_layer_names=False, rankdir='TB')

In [23]:
Image('images/sae_first_layer.png')


Out[23]:

In [24]:
# autoencoder1 = keras.models.load_model('models/autoencoder1.h5')
# encoder1 = keras.models.load_model('models/encoder1.h5')
autoencoder1.fit(x_train
                 , y_train
                 , epochs=8
                 , batch_size=batch_size
                 , shuffle=True
                 , verbose=False
                 )
# autoencoder1.save('models/autoencoder1.h5')
# encoder1.save('models/encoder1.h5')


Out[24]:
<keras.callbacks.History at 0x243e3e8bdd8>

In [25]:
score1 = autoencoder1.evaluate(x_val, y_val, verbose=0)
print('Test loss:', score1[0])
print('Test accuracy:', score1[1])


Test loss: 0.0135303341903
Test accuracy: 0.995899995232

Second Layer


In [26]:
first_layer_code = encoder1.predict(x_train)

encoding_dim2 = 64
epoch2 = 5

encoded_2_input = Input(shape=(encoding_dim1,))
encoded2 = Dense(encoding_dim2, activation='relu')(encoded_2_input)
decoded2 = Dense(encoding_dim1, activation='relu')(encoded2)
class2 = Dense(num_classes, activation='softmax')(decoded2)

autoencoder2 = Model(encoded_2_input, class2)
autoencoder2.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder2 = Model(encoded_2_input, encoded2)
encoder2.compile(optimizer=RMSprop(), loss='binary_crossentropy')

In [27]:
plot_model(autoencoder2, to_file='images/sae_second_layer.png', show_shapes=True, show_layer_names=False, rankdir='TB')

In [28]:
Image('images/sae_second_layer.png')


Out[28]:

In [29]:
# autoencoder2 = keras.models.load_model('models/autoencoder2.h5')
# encoder2 = keras.models.load_model('models/encoder2.h5')
autoencoder2.fit(first_layer_code
                 , y_train
                 , epochs=epoch2
                 , batch_size=batch_size
                 , shuffle=True
                 , verbose=False
                 )
# autoencoder2.save('models/autoencoder2.h5')
# encoder2.save('models/encoder2.h5')


Out[29]:
<keras.callbacks.History at 0x243e4389470>

In [30]:
first_layer_code_val = encoder1.predict(x_val)

score2 = autoencoder2.evaluate(first_layer_code_val, y_val, verbose=0)
print('Test loss:', score2[0])
print('Test accuracy:', score2[1])


Test loss: 0.0150333253332
Test accuracy: 0.995829997444

Third layer


In [31]:
second_layer_code = encoder2.predict(encoder1.predict(x_train))

encoding_dim3 = 32
epoch3 = 5

encoded_3_input = Input(shape=(encoding_dim2,))
encoded3 = Dense(encoding_dim3, activation='relu')(encoded_3_input)
decoded3 = Dense(encoding_dim2, activation='relu')(encoded3)
class3 = Dense(num_classes, activation='softmax')(decoded3)

autoencoder3 = Model(encoded_3_input, class3)
autoencoder3.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder3 = Model(encoded_3_input, encoded3)
encoder3.compile(optimizer=RMSprop(), loss='binary_crossentropy')

In [32]:
# autoencoder2 = keras.models.load_model('models/autoencoder2.h5')
# encoder2 = keras.models.load_model('models/encoder2.h5')
autoencoder3.fit(second_layer_code
                 , y_train
                 , epochs=epoch3
                 , batch_size=batch_size
                 , shuffle=True
                 , verbose=False
                 )
# autoencoder2.save('models/autoencoder2.h5')
# encoder2.save('models/encoder2.h5')


Out[32]:
<keras.callbacks.History at 0x243e4347cf8>

In [33]:
second_layer_code_val = encoder2.predict(encoder1.predict(x_val))

score3 = autoencoder3.evaluate(second_layer_code_val, y_val, verbose=0)
print('Test loss:', score3[0])
print('Test accuracy:', score3[1])


Test loss: 0.0151434558393
Test accuracy: 0.996019996071

Stacked image reconstruction


In [34]:
sae_encoded1 = Dense(encoding_dim1, activation='relu')(input_img)
sae_encoded2 = Dense(encoding_dim2, activation='relu')(sae_encoded1)
sae_encoded3 = Dense(encoding_dim3, activation='relu')(sae_encoded2)
sae_decoded1 = Dense(encoding_dim2, activation='relu')(sae_encoded3)
sae_decoded2 = Dense(encoding_dim1, activation='relu')(sae_decoded1)
sae_decoded3 = Dense(input_dim, activation='sigmoid')(sae_decoded2)

sae = Model(input_img, sae_decoded3)

sae.layers[1].set_weights(autoencoder1.layers[1].get_weights())
sae.layers[2].set_weights(autoencoder2.layers[1].get_weights())
sae.layers[3].set_weights(autoencoder3.layers[1].get_weights())
# sae.layers[4].set_weights(autoencoder3.layers[2].get_weights())
# sae.layers[5].set_weights(autoencoder2.layers[2].get_weights())
# sae.layers[6].set_weights(autoencoder1.layers[2].get_weights())

sae.compile(loss='binary_crossentropy', optimizer=RMSprop())

In [35]:
sae.fit(x_train
        , x_train
        , epochs=10
        , batch_size=batch_size
        , shuffle=True
        , verbose=False
        )


Out[35]:
<keras.callbacks.History at 0x243f4313c50>

In [36]:
score4 = sae.evaluate(x_val, x_val, verbose=0)
print('Test loss:', score4)


Test loss: 0.108200454664

In [37]:
decoded_imgs = sae.predict(x_val)
n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_val[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()


Denoising with Stacked Autoencoder


In [38]:
noise_factor = 0.5
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape) 
x_val_noisy = x_val + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_val.shape) 

# re-normalization by clipping to the intervall (0,1)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_val_noisy = np.clip(x_val_noisy, 0., 1.)

n = 10
plt.figure(figsize=(20, 2))
for i in range(n):
    ax = plt.subplot(1, n, i + 1)
    plt.imshow(x_val_noisy[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()



In [39]:
denoising_autoencoder = Model(input_img, sae_decoded3)
denoising_autoencoder.compile(optimizer=RMSprop(), loss='binary_crossentropy')

In [40]:
denoising_autoencoder.fit(x_train_noisy, x_train,
                          epochs=100,
                          batch_size=batch_size,
                          shuffle=True,
                          verbose=0)


Out[40]:
<keras.callbacks.History at 0x243dd6b8b70>

In [41]:
decoded_imgs = denoising_autoencoder.predict(x_val_noisy)

In [42]:
n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_val[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()


Classification of noisy data


In [43]:
score = model.evaluate(x_val_noisy, y_val, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


Test loss: 0.845080524254
Test accuracy: 0.917590032101

In [44]:
score = model.evaluate(decoded_imgs, y_val, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])


Test loss: 0.0639302133186
Test accuracy: 0.989190002823

Classification with stacked autoencoder


In [45]:
input_img = Input(shape=(input_dim,))
sae_classifier_encoded1 = Dense(encoding_dim1, activation='relu')(input_img)
sae_classifier_encoded2 = Dense(encoding_dim2, activation='relu')(sae_classifier_encoded1)
sae_classifier_encoded3 = Dense(encoding_dim3, activation='relu')(sae_classifier_encoded2)
class_layer = Dense(num_classes, activation='softmax')(sae_classifier_encoded3)

sae_classifier = Model(inputs=input_img, outputs=class_layer)

sae_classifier.layers[1].set_weights(autoencoder1.layers[1].get_weights())
sae_classifier.layers[2].set_weights(autoencoder2.layers[1].get_weights())
sae_classifier.layers[3].set_weights(autoencoder3.layers[1].get_weights())
sae_classifier.compile(loss='binary_crossentropy', optimizer=RMSprop(), metrics=['accuracy'])

In [46]:
plot_model(sae_classifier, to_file='images/sae_classifier.png', show_shapes=True, show_layer_names=False, rankdir='TB')

In [47]:
Image('images/sae_classifier.png')


Out[47]:

In [48]:
# sae_classifier = keras.models.load_model('models/sae_classifier.h5')
sae_classifier.fit(x_train, y_train
               , epochs=7
                   , verbose=False
               , batch_size=batch_size
               , shuffle=True)
# sae_classifier.save('models/sae_classifier.h5')


Out[48]:
<keras.callbacks.History at 0x24384238630>

In [49]:
score5 = sae_classifier.evaluate(x_val, y_val)
print('Test loss:', score5[0])
print('Test accuracy:', score5[1])


10000/10000 [==============================] - 1s 58us/step
Test loss: 0.0156491303024
Test accuracy: 0.995669996262

Plot a two dimensional representation of the data

Fourth layer


In [50]:
third_layer_code = encoder3.predict(encoder2.predict(encoder1.predict(x_train)))

encoding_dim4 = 2

encoded_4_input = Input(shape=(encoding_dim3,))
encoded4 = Dense(encoding_dim4, activation='sigmoid')(encoded_4_input)
decoded4 = Dense(encoding_dim3, activation='sigmoid')(encoded4)
class4 = Dense(num_classes, activation='softmax')(decoded4)

autoencoder4 = Model(encoded_4_input, class4)
autoencoder4.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder4 = Model(encoded_4_input, encoded4)
encoder4.compile(optimizer=RMSprop(), loss='binary_crossentropy')

In [51]:
autoencoder4.fit(third_layer_code
                 , y_train
                 , epochs=50
                 , batch_size=batch_size
                 , shuffle=True
                 , verbose=False
                 )


Out[51]:
<keras.callbacks.History at 0x243dd8a3898>

In [53]:
third_layer_code_val = encoder3.predict(encoder2.predict(encoder1.predict(x_val)))

score4 = autoencoder4.evaluate(third_layer_code_val, y_val, verbose=0)
print('Test loss:', score4[0])
print('Test accuracy:', score4[1])


Test loss: 0.0901743632257
Test accuracy: 0.969620018482

In [160]:
fourth_layer_code = encoder4.predict(encoder3.predict(encoder2.predict(encoder1.predict(x_train))))

In [161]:
value1 = [x[0] for x in fourth_layer_code]
value2 = [x[1] for x in fourth_layer_code]
y_classes = mnist.load_data()[0][1]

In [162]:
data = {'value1': value1, 'value2': value2, 'class' : y_classes}
data = pd.DataFrame.from_dict(data)

In [163]:
data.head()


Out[163]:
class value1 value2
0 5 0.032408 0.662961
1 0 0.898993 0.121290
2 4 0.361072 0.688325
3 1 0.826617 0.417664
4 9 0.615677 0.719501

In [172]:
groups = data.groupby('class')

# Plot
fig, ax = plt.subplots(figsize=(20,10))
# plt.figure(figsize=(20,10))
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
for name, group in groups:
    ax.plot(group.value1, group.value2, marker='o', linestyle='', ms=3, label=name, alpha=0.7)
ax.legend()

plt.show()


Fourth layer


In [173]:
third_layer_code = x_train

encoding_dim4 = 2

encoded_4_input = Input(shape=(input_dim,))
encoded4 = Dense(encoding_dim4, activation='sigmoid')(encoded_4_input)
decoded4 = Dense(input_dim, activation='sigmoid')(encoded4)
class4 = Dense(num_classes, activation='softmax')(decoded4)

autoencoder4 = Model(encoded_4_input, class4)
autoencoder4.compile(optimizer=RMSprop(), loss='binary_crossentropy', metrics=['accuracy'])
encoder4 = Model(encoded_4_input, encoded4)
encoder4.compile(optimizer=RMSprop(), loss='binary_crossentropy')

In [174]:
autoencoder4.fit(third_layer_code
                 , y_train
                 , epochs=50
                 , batch_size=batch_size
                 , shuffle=True
                 , verbose=False
                 )


Out[174]:
<keras.callbacks.History at 0x212e4ecf390>

In [175]:
third_layer_code_val = x_val

score4 = autoencoder4.evaluate(third_layer_code_val, y_val, verbose=0)
print('Test loss:', score3[0])
print('Test accuracy:', score3[1])


Test loss: 0.0906211482525
Test accuracy: 0.968730016327

In [176]:
fourth_layer_code = encoder4.predict(x_train)

In [177]:
value1 = [x[0] for x in fourth_layer_code]
value2 = [x[1] for x in fourth_layer_code]
y_classes = mnist.load_data()[0][1]

In [178]:
data = {'value1': value1, 'value2': value2, 'class' : y_classes}
data = pd.DataFrame.from_dict(data)

In [163]:
data.head()


Out[163]:
class value1 value2
0 5 0.032408 0.662961
1 0 0.898993 0.121290
2 4 0.361072 0.688325
3 1 0.826617 0.417664
4 9 0.615677 0.719501

In [179]:
groups = data.groupby('class')

# Plot
fig, ax = plt.subplots(figsize=(20,10))
# plt.figure(figsize=(20,10))
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
for name, group in groups:
    ax.plot(group.value1, group.value2, marker='o', linestyle='', ms=3, label=name, alpha=0.7)
ax.legend()

plt.show()