In [1]:
from keras.layers import Input, Dense
from keras.models import Model

# this is the size of our encoded representations
encoding_dim = 32  # 32 floats -> compression of factor 24.5, assuming the input is 784 floats

# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)


/Users/wab665/anaconda3/envs/py36/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.

In [2]:
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)

In [3]:
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))

In [4]:
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

In [5]:
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()


Downloading data from https://s3.amazonaws.com/img-datasets/mnist.npz
11493376/11490434 [==============================] - 31s 3us/step

In [8]:
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)


(60000, 784)
(10000, 784)

In [9]:
autoencoder.fit(x_train, x_train,
                epochs=50,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test, x_test))


Train on 60000 samples, validate on 10000 samples
Epoch 1/50
60000/60000 [==============================] - 5s 83us/step - loss: 0.3491 - val_loss: 0.2694
Epoch 2/50
60000/60000 [==============================] - 4s 70us/step - loss: 0.2605 - val_loss: 0.2478
Epoch 3/50
60000/60000 [==============================] - 4s 70us/step - loss: 0.2370 - val_loss: 0.2240
Epoch 4/50
60000/60000 [==============================] - 4s 62us/step - loss: 0.2166 - val_loss: 0.2069
Epoch 5/50
60000/60000 [==============================] - 5s 77us/step - loss: 0.2023 - val_loss: 0.1951
Epoch 6/50
60000/60000 [==============================] - 4s 67us/step - loss: 0.1920 - val_loss: 0.1861
Epoch 7/50
60000/60000 [==============================] - 4s 67us/step - loss: 0.1840 - val_loss: 0.1791
Epoch 8/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1774 - val_loss: 0.1729
Epoch 9/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1719 - val_loss: 0.1679
Epoch 10/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1670 - val_loss: 0.1632
Epoch 11/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1626 - val_loss: 0.1590
Epoch 12/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1586 - val_loss: 0.1551
Epoch 13/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1548 - val_loss: 0.1515
Epoch 14/50
60000/60000 [==============================] - 4s 63us/step - loss: 0.1514 - val_loss: 0.1482
Epoch 15/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1481 - val_loss: 0.1450
Epoch 16/50
60000/60000 [==============================] - 4s 61us/step - loss: 0.1451 - val_loss: 0.1421
Epoch 17/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1424 - val_loss: 0.1395
Epoch 18/50
60000/60000 [==============================] - 4s 62us/step - loss: 0.1398 - val_loss: 0.1371
Epoch 19/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1375 - val_loss: 0.1348
Epoch 20/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1352 - val_loss: 0.1326
Epoch 21/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1332 - val_loss: 0.1305
Epoch 22/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1312 - val_loss: 0.1286
Epoch 23/50
60000/60000 [==============================] - 4s 61us/step - loss: 0.1293 - val_loss: 0.1269
Epoch 24/50
60000/60000 [==============================] - 5s 84us/step - loss: 0.1275 - val_loss: 0.1250
Epoch 25/50
60000/60000 [==============================] - 4s 70us/step - loss: 0.1258 - val_loss: 0.1233
Epoch 26/50
60000/60000 [==============================] - 4s 62us/step - loss: 0.1242 - val_loss: 0.1217
Epoch 27/50
60000/60000 [==============================] - 4s 62us/step - loss: 0.1226 - val_loss: 0.1202
Epoch 28/50
60000/60000 [==============================] - 4s 61us/step - loss: 0.1211 - val_loss: 0.1187
Epoch 29/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1197 - val_loss: 0.1173
Epoch 30/50
60000/60000 [==============================] - 4s 61us/step - loss: 0.1183 - val_loss: 0.1160
Epoch 31/50
60000/60000 [==============================] - 4s 68us/step - loss: 0.1170 - val_loss: 0.1147
Epoch 32/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1158 - val_loss: 0.1135
Epoch 33/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1147 - val_loss: 0.1124
Epoch 34/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1136 - val_loss: 0.1113
Epoch 35/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1126 - val_loss: 0.1103
Epoch 36/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1116 - val_loss: 0.1095
Epoch 37/50
60000/60000 [==============================] - 3s 57us/step - loss: 0.1107 - val_loss: 0.1086
Epoch 38/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1099 - val_loss: 0.1078
Epoch 39/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1091 - val_loss: 0.1070
Epoch 40/50
60000/60000 [==============================] - 4s 62us/step - loss: 0.1084 - val_loss: 0.1063
Epoch 41/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1077 - val_loss: 0.1057
Epoch 42/50
60000/60000 [==============================] - 4s 64us/step - loss: 0.1071 - val_loss: 0.1051
Epoch 43/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1065 - val_loss: 0.1045
Epoch 44/50
60000/60000 [==============================] - 4s 65us/step - loss: 0.1060 - val_loss: 0.1040
Epoch 45/50
60000/60000 [==============================] - 4s 59us/step - loss: 0.1055 - val_loss: 0.1035
Epoch 46/50
60000/60000 [==============================] - 4s 63us/step - loss: 0.1050 - val_loss: 0.1031
Epoch 47/50
60000/60000 [==============================] - 4s 61us/step - loss: 0.1046 - val_loss: 0.1027
Epoch 48/50
60000/60000 [==============================] - 4s 60us/step - loss: 0.1042 - val_loss: 0.1023
Epoch 49/50
60000/60000 [==============================] - 3s 58us/step - loss: 0.1038 - val_loss: 0.1020
Epoch 50/50
60000/60000 [==============================] - 4s 61us/step - loss: 0.1035 - val_loss: 0.1016
Out[9]:
<keras.callbacks.History at 0x131213978>

In [10]:
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)

In [12]:
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
% matplotlib inline
n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    plt.show()



In [ ]: