In [1]:
"""
This code is from https://blog.keras.io/building-autoencoders-in-keras.html
"""
# a demo model of Autoencoder
from keras.layers import Input, Dense
from keras.models import Model


Using TensorFlow backend.

In [2]:
encoding_dim = 32
input_img = Input(shape=(784,))

encoded = Dense(encoding_dim, activation='relu')(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)

autoencoder = Model(input=input_img, output=decoded)


Using TensorFlow backend.

In [3]:
# Create a separate encoder model
encoder = Model(input=input_img, output=encoded)
encoded_input = Input(shape=(encoding_dim,))

decoder_layer = autoencoder.layers[-1]
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))

In [4]:
# Training our model on MNIST digits
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')

In [2]:
from keras.datasets import mnist
import numpy as np
(x_train, _),(x_test, _) = mnist.load_data()

In [6]:
x_train.shape


Out[6]:
(60000, 28, 28)

In [7]:
x_train = x_train.astype('float32')/255.0
x_test = x_test.astype('float32')/255.0

x_train = x_train.reshape(len(x_train), np.prod(x_train.shape[1:]))
x_test = x_test.reshape(len(x_test), np.prod(x_test.shape[1:]))
print(x_train.shape)


(60000, 784)

In [8]:
# Train the model
autoencoder.fit(x_train, x_train, nb_epoch=10,
               batch_size=256, shuffle=True,
               validation_data=(x_test, x_test))


Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 7s - loss: 0.2737 - val_loss: 0.1916
Epoch 2/10
60000/60000 [==============================] - 7s - loss: 0.1726 - val_loss: 0.1552
Epoch 3/10
60000/60000 [==============================] - 6s - loss: 0.1455 - val_loss: 0.1342
Epoch 4/10
60000/60000 [==============================] - 6s - loss: 0.1290 - val_loss: 0.1214
Epoch 5/10
60000/60000 [==============================] - 7s - loss: 0.1187 - val_loss: 0.1133
Epoch 6/10
60000/60000 [==============================] - 7s - loss: 0.1118 - val_loss: 0.1074
Epoch 7/10
60000/60000 [==============================] - 8s - loss: 0.1066 - val_loss: 0.1031
Epoch 8/10
60000/60000 [==============================] - 8s - loss: 0.1027 - val_loss: 0.0998
Epoch 9/10
60000/60000 [==============================] - 7s - loss: 0.0997 - val_loss: 0.0973
Epoch 10/10
60000/60000 [==============================] - 7s - loss: 0.0977 - val_loss: 0.0957
Out[8]:
<keras.callbacks.History at 0x7f7b4c5904a8>

In [9]:
# Encode the dataset
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)

In [10]:
import matplotlib.pyplot as plt

n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()



In [3]:
# Convolutional Autoencoder
from keras.layers import Convolution2D, MaxPooling2D, UpSampling2D

In [4]:
from keras.layers import Input, Dense, Convolution2D, MaxPooling2D, UpSampling2D
from keras.models import Model

input_img = Input(shape=(1, 28, 28))

x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)

# at this point the representation is (8, 4, 4) i.e. 128-dimensional

x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(16, 3, 3, activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')(x)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

In [5]:
(x_train, _), (x_test, _) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 1, 28, 28))
x_test = np.reshape(x_test, (len(x_test), 1, 28, 28))

In [ ]:
from keras.callbacks import TensorBoard

autoencoder.fit(x_train, x_train,
                nb_epoch=20,
                batch_size=128,
                shuffle=True,
                validation_data=(x_test, x_test),
                callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])

In [11]:
# Sequence 2 sequence
timesteps = 10
input_dim = 200
latent_dim = 100
from keras.layers import Input, LSTM, RepeatVector
from keras.models import Model

inputs = Input(shape=(timesteps, input_dim))
encoded = LSTM(latent_dim)(inputs)

decoded = RepeatVector(timesteps)(encoded)
decoded = LSTM(input_dim, return_sequences=True)(decoded)

sequence_autoencoder = Model(inputs, decoded)
encoder = Model(inputs, encoded)

In [ ]: