In [0]:
import requests
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Input, Dense, Conv2D, UpSampling2D, MaxPool2D, Activation

In [0]:
def normalize_reshape(x):
  x = x.reshape(-1, 28, 28, 1)
  x = x.astype('float32') / 255
  x_flatten = x.reshape((len(x), np.prod(x_train.shape[1:])))
  
  return x, x_flatten

In [0]:
def get_mlp_ae(encoding_dim):
  input_img = Input(shape=(784,))

  # "encoded" is the encoded representation of the input
  encoded = Dense(encoding_dim, activation='relu')(input_img)

  # "decoded" is the lossy reconstruction of the input
  decoded = Dense(784, activation='sigmoid')(encoded)

  # this model maps an input to its reconstruction
  autoencoder = Model(input_img, decoded)

  encoder = Model(input_img, encoded)

  # create a placeholder for an encoded (32-dimensional) input
  encoded_input = Input(shape=(encoding_dim,))
  # retrieve the last layer of the autoencoder model
  decoder_layer = autoencoder.layers[-1]
  # create the decoder model
  decoder = Model(encoded_input, decoder_layer(encoded_input))
  
  return encoder, decoder, autoencoder

In [0]:
def get_cnn_ae():
  pass

In [0]:
def plot_sample(imgs, encoded_imgs, encoded_imgs_size, decoded_imgs):
  ids = np.random.randint(10000, size=10)
  for id in ids:
    plt.figure(figsize=(10, 12))
    plt.subplot(1, 3, 1)
    plt.imshow(imgs[id].reshape([-1, 28]))
    plt.subplot(1, 3, 2)
    plt.imshow(encoded_imgs[id].reshape([-1, encoded_imgs_size]))
    plt.subplot(1, 3, 3)
    plt.imshow(decoded_imgs[id].reshape([-1, 28]))

In [0]:
(x_train, _), (x_test, _) = tf.keras.datasets.fashion_mnist.load_data()
(x_train_mnist, _), (x_test_mnist, _) = tf.keras.datasets.mnist.load_data()

x_train, x_train_flatten = normalize_reshape(x_train)
x_test, x_test_flatten = normalize_reshape(x_test)

x_train_mnist, x_train_mnist_flatten = normalize_reshape(x_train_mnist)
x_test_mnist, x_test_mnist_flatten = normalize_reshape(x_test_mnist)

e, d, ae = get_mlp_ae(64)
ae.compile(optimizer='adadelta', loss='mse')
ae.fit(x_train_flatten, x_train_flatten,
                epochs=50,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test_flatten, x_test_flatten))

# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = e.predict(x_test_flatten)
decoded_imgs = d.predict(encoded_imgs)


Train on 60000 samples, validate on 10000 samples
Epoch 1/50
60000/60000 [==============================] - 2s 37us/step - loss: 0.1163 - val_loss: 0.0881
Epoch 2/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0799 - val_loss: 0.0716
Epoch 3/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0666 - val_loss: 0.0615
Epoch 4/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0588 - val_loss: 0.0560
Epoch 5/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0546 - val_loss: 0.0527
Epoch 6/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0514 - val_loss: 0.0497
Epoch 7/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0485 - val_loss: 0.0470
Epoch 8/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0459 - val_loss: 0.0445
Epoch 9/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0435 - val_loss: 0.0424
Epoch 10/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0415 - val_loss: 0.0405
Epoch 11/50
60000/60000 [==============================] - 2s 40us/step - loss: 0.0397 - val_loss: 0.0388
Epoch 12/50
60000/60000 [==============================] - 2s 40us/step - loss: 0.0380 - val_loss: 0.0372
Epoch 13/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0365 - val_loss: 0.0358
Epoch 14/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0351 - val_loss: 0.0344
Epoch 15/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0338 - val_loss: 0.0332
Epoch 16/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0326 - val_loss: 0.0321
Epoch 17/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0316 - val_loss: 0.0311
Epoch 18/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0306 - val_loss: 0.0301
Epoch 19/50
60000/60000 [==============================] - 2s 37us/step - loss: 0.0297 - val_loss: 0.0293
Epoch 20/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0289 - val_loss: 0.0286
Epoch 21/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0282 - val_loss: 0.0279
Epoch 22/50
60000/60000 [==============================] - 2s 40us/step - loss: 0.0275 - val_loss: 0.0273
Epoch 23/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0269 - val_loss: 0.0267
Epoch 24/50
60000/60000 [==============================] - 2s 40us/step - loss: 0.0264 - val_loss: 0.0262
Epoch 25/50
60000/60000 [==============================] - 2s 40us/step - loss: 0.0259 - val_loss: 0.0257
Epoch 26/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0254 - val_loss: 0.0253
Epoch 27/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0250 - val_loss: 0.0248
Epoch 28/50
60000/60000 [==============================] - 2s 37us/step - loss: 0.0246 - val_loss: 0.0245
Epoch 29/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0242 - val_loss: 0.0241
Epoch 30/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0239 - val_loss: 0.0238
Epoch 31/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0236 - val_loss: 0.0235
Epoch 32/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0233 - val_loss: 0.0232
Epoch 33/50
60000/60000 [==============================] - 2s 37us/step - loss: 0.0230 - val_loss: 0.0229
Epoch 34/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0228 - val_loss: 0.0227
Epoch 35/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0225 - val_loss: 0.0225
Epoch 36/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0223 - val_loss: 0.0222
Epoch 37/50
60000/60000 [==============================] - 2s 40us/step - loss: 0.0221 - val_loss: 0.0220
Epoch 38/50
60000/60000 [==============================] - 2s 40us/step - loss: 0.0219 - val_loss: 0.0218
Epoch 39/50
60000/60000 [==============================] - 2s 36us/step - loss: 0.0217 - val_loss: 0.0216
Epoch 40/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0215 - val_loss: 0.0215
Epoch 41/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0213 - val_loss: 0.0213
Epoch 42/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0212 - val_loss: 0.0211
Epoch 43/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0210 - val_loss: 0.0210
Epoch 44/50
60000/60000 [==============================] - 2s 37us/step - loss: 0.0208 - val_loss: 0.0208
Epoch 45/50
60000/60000 [==============================] - 2s 36us/step - loss: 0.0207 - val_loss: 0.0207
Epoch 46/50
60000/60000 [==============================] - 2s 39us/step - loss: 0.0206 - val_loss: 0.0205
Epoch 47/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0204 - val_loss: 0.0204
Epoch 48/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0203 - val_loss: 0.0203
Epoch 49/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0201 - val_loss: 0.0201
Epoch 50/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.0200 - val_loss: 0.0200

In [0]:
plot_sample(x_test, encoded_imgs, 8, decoded_imgs)



In [0]:
e_mnist, d_mnist, ae_mnist = get_mlp_ae(64)
ae_mnist.compile(optimizer='adadelta', loss='mse')
ae_mnist.fit(x_train_mnist_flatten, x_train_mnist_flatten,
                epochs=50,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test_mnist_flatten, x_test_mnist_flatten))

# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs_mnist = e_mnist.predict(x_test_mnist_flatten)
decoded_imgs_mnist = d_mnist.predict(encoded_imgs_mnist)


Train on 60000 samples, validate on 10000 samples
Epoch 1/50
60000/60000 [==============================] - 2s 38us/step - loss: 0.1180 - val_loss: 0.0717
Epoch 2/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0699 - val_loss: 0.0685
Epoch 3/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0672 - val_loss: 0.0655
Epoch 4/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0638 - val_loss: 0.0615
Epoch 5/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0595 - val_loss: 0.0571
Epoch 6/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0554 - val_loss: 0.0531
Epoch 7/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0518 - val_loss: 0.0498
Epoch 8/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0489 - val_loss: 0.0471
Epoch 9/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0464 - val_loss: 0.0448
Epoch 10/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0442 - val_loss: 0.0429
Epoch 11/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0424 - val_loss: 0.0412
Epoch 12/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0409 - val_loss: 0.0398
Epoch 13/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0395 - val_loss: 0.0384
Epoch 14/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0383 - val_loss: 0.0373
Epoch 15/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0372 - val_loss: 0.0362
Epoch 16/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0361 - val_loss: 0.0352
Epoch 17/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0352 - val_loss: 0.0343
Epoch 18/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0343 - val_loss: 0.0334
Epoch 19/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0334 - val_loss: 0.0327
Epoch 20/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0327 - val_loss: 0.0319
Epoch 21/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0319 - val_loss: 0.0312
Epoch 22/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0312 - val_loss: 0.0305
Epoch 23/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0306 - val_loss: 0.0298
Epoch 24/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0299 - val_loss: 0.0292
Epoch 25/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0293 - val_loss: 0.0287
Epoch 26/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0288 - val_loss: 0.0281
Epoch 27/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0282 - val_loss: 0.0276
Epoch 28/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0277 - val_loss: 0.0270
Epoch 29/50
60000/60000 [==============================] - 2s 36us/step - loss: 0.0272 - val_loss: 0.0265
Epoch 30/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0267 - val_loss: 0.0261
Epoch 31/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0262 - val_loss: 0.0256
Epoch 32/50
60000/60000 [==============================] - 2s 36us/step - loss: 0.0258 - val_loss: 0.0252
Epoch 33/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0253 - val_loss: 0.0247
Epoch 34/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0249 - val_loss: 0.0243
Epoch 35/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0245 - val_loss: 0.0239
Epoch 36/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0241 - val_loss: 0.0235
Epoch 37/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0237 - val_loss: 0.0231
Epoch 38/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0233 - val_loss: 0.0227
Epoch 39/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0229 - val_loss: 0.0224
Epoch 40/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0226 - val_loss: 0.0220
Epoch 41/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0222 - val_loss: 0.0217
Epoch 42/50
60000/60000 [==============================] - 2s 33us/step - loss: 0.0219 - val_loss: 0.0213
Epoch 43/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0216 - val_loss: 0.0210
Epoch 44/50
60000/60000 [==============================] - 2s 36us/step - loss: 0.0212 - val_loss: 0.0207
Epoch 45/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0209 - val_loss: 0.0204
Epoch 46/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0206 - val_loss: 0.0201
Epoch 47/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0203 - val_loss: 0.0198
Epoch 48/50
60000/60000 [==============================] - 2s 35us/step - loss: 0.0200 - val_loss: 0.0195
Epoch 49/50
60000/60000 [==============================] - 2s 36us/step - loss: 0.0197 - val_loss: 0.0192
Epoch 50/50
60000/60000 [==============================] - 2s 34us/step - loss: 0.0195 - val_loss: 0.0189

In [0]:
plot_sample(x_test_mnist_flatten, encoded_imgs_mnist, 8, decoded_imgs_mnist)


Encoding and decoding Fashion-MNIST images with autoencoder trained on MNIST dataset


In [0]:
encoded_imgs_1 = e_mnist.predict(x_test_flatten)
decoded_imgs_1 = d_mnist.predict(encoded_imgs_1)

In [0]:
plot_sample(x_test_flatten, encoded_imgs_1, 8, decoded_imgs_1)



In [0]: