In [1]:
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, UpSampling2D
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
#Seed for reproducibilty
np.random.seed(1338)


Using Theano backend.

In [2]:
%%time
#Loading the training and testing data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_rows, img_cols = 28, 28
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255


CPU times: user 1.38 s, sys: 108 ms, total: 1.48 s
Wall time: 1.48 s

In [3]:
%%time
#Selecting 6000 random examples from the test data
test_rows = np.random.randint(0,X_test.shape[0],6000)
X_test = X_test[test_rows]

#Selecting the 5918 examples where the output is 6
X_six = X_train[y_train == 6]
#Selecting the examples where the output is not 6
X_not_six = X_train[y_train != 6]

#Selecting 6000 random examples from the data that contains only the data where the output is not 6
random_rows = np.random.randint(0,X_six.shape[0],6000)
X_not_six = X_not_six[random_rows]


CPU times: user 84 ms, sys: 8 ms, total: 92 ms
Wall time: 89 ms

In [4]:
%%time
#Appending the data with output as 6 and data with output as not six
X_train = np.append(X_six,X_not_six)
#Reshaping the appended data to appropraite form
X_train = X_train.reshape(X_six.shape[0] + X_not_six.shape[0], 1, img_rows, img_cols)


CPU times: user 12 ms, sys: 4 ms, total: 16 ms
Wall time: 14.6 ms

In [5]:
%%time
input_img = Input(shape=(1, 28, 28))

x = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(input_img)
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
encoded = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)

# at this point the representation is (8, 4, 4) i.e. 128-dimensional

x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
x = Convolution2D(32, 3, 3, activation='relu',border_mode='same')(x)
decoded = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')(x)


CPU times: user 484 ms, sys: 36 ms, total: 520 ms
Wall time: 587 ms

In [6]:
%%time
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')


CPU times: user 32 ms, sys: 0 ns, total: 32 ms
Wall time: 35.9 ms

In [7]:
%%time
autoencoder.fit(X_train, X_train,
                nb_epoch=15,
                batch_size=128,
                shuffle=True,
                validation_data=(X_test, X_test))


Train on 11918 samples, validate on 6000 samples
Epoch 1/15
11918/11918 [==============================] - 165s - loss: 0.1582 - val_loss: 0.0944
Epoch 2/15
11918/11918 [==============================] - 169s - loss: 0.0844 - val_loss: 0.0790
Epoch 3/15
11918/11918 [==============================] - 173s - loss: 0.0778 - val_loss: 0.0781
Epoch 4/15
11918/11918 [==============================] - 171s - loss: 0.0734 - val_loss: 0.0688
Epoch 5/15
11918/11918 [==============================] - 167s - loss: 0.0705 - val_loss: 0.0663
Epoch 6/15
11918/11918 [==============================] - 168s - loss: 0.0687 - val_loss: 0.0661
Epoch 7/15
11918/11918 [==============================] - 167s - loss: 0.0676 - val_loss: 0.0642
Epoch 8/15
11918/11918 [==============================] - 172s - loss: 0.0666 - val_loss: 0.0637
Epoch 9/15
11918/11918 [==============================] - 173s - loss: 0.0657 - val_loss: 0.0650
Epoch 10/15
11918/11918 [==============================] - 166s - loss: 0.0652 - val_loss: 0.0627
Epoch 11/15
11918/11918 [==============================] - 166s - loss: 0.0646 - val_loss: 0.0630
Epoch 12/15
11918/11918 [==============================] - 167s - loss: 0.0644 - val_loss: 0.0621
Epoch 13/15
11918/11918 [==============================] - 167s - loss: 0.0639 - val_loss: 0.0622
Epoch 14/15
11918/11918 [==============================] - 168s - loss: 0.0636 - val_loss: 0.0629
Epoch 15/15
11918/11918 [==============================] - 167s - loss: 0.0634 - val_loss: 0.0619
CPU times: user 1h 22min 38s, sys: 1min 4s, total: 1h 23min 42s
Wall time: 42min 19s
Out[7]:
<keras.callbacks.History at 0x7f3972e05ac8>

In [ ]:
%%time
decoded_imgs = autoencoder.predict(X_test)


CPU times: user 40.3 s, sys: 544 ms, total: 40.9 s
Wall time: 20.8 s

In [ ]:
%%time
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt

n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(X_test[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

In [ ]:


In [ ]: