In [1]:
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"

In [2]:
import os
import glob
import tensorflow as tf
import numpy as np
from keras import layers, models, optimizers, losses, metrics, regularizers
from keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard
from keras.preprocessing.sequence import pad_sequences
import keras.backend as K 
from keras.engine.topology import Container


/home/duke/.conda/envs/heads/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
Using TensorFlow backend.

In [3]:
import matplotlib.pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
plt.rcParams['figure.figsize'] = (15, 12) # set default size of plots

In [4]:
from tensorflow.python import debug as tf_debug

sess = K.get_session()
sess.as_default()


Out[4]:
<contextlib._GeneratorContextManager at 0x7f557683dd30>

In [5]:
from keras_contrib.layers.normalization import InstanceNormalization

In [6]:
from keras.datasets import mnist

In [7]:
(x_train, y_train), (x_test, y_test) = mnist.load_data()

In [8]:
y_train = np.squeeze(y_train)
y_test = np.squeeze(y_test)

In [9]:
classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
num_classes = len(classes)
samples_per_class = 10
for y, cls in enumerate(classes):
    idxs = np.flatnonzero(y_train == y)
    idxs = np.random.choice(idxs, samples_per_class, replace=False)
    for i, idx in enumerate(idxs):
        plt_idx = i * num_classes + y + 1
        plt.subplot(samples_per_class, num_classes, plt_idx)
        plt.imshow(x_train[idx].astype('uint8'), cmap='gray')
        plt.axis('off')
        if i == 0:
            plt.title(cls)
plt.show()



In [10]:
def build_encoder():
    img_input = layers.Input(shape=(28, 28, 1))

    output = None
    return models.Model(img_input, output, 
                        name='encoder')

In [11]:
def build_decoder():
    emb_input = layers.Input(shape=(128,))
    
    output = None
    return models.Model(emb_input, output, 
                        name='decoder')

In [12]:
def build_autoencoder():
    img_input = layers.Input(shape=(28, 28, 1))
    encoder = build_encoder()
    decoder = build_decoder()
    
    embedding = encoder(img_input)
    reconstructed_input = decoder(embedding)
    
    return models.Model(img_input, reconstructed_input, 
                        name='autoencoder')

In [13]:
K.clear_session()
model = build_autoencoder()

In [14]:
model.summary()


_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         (None, 28, 28, 1)         0         
_________________________________________________________________
encoder (Model)              (None, 128)               394560    
_________________________________________________________________
decoder (Model)              (None, 28, 28, 1)         138417    
=================================================================
Total params: 532,977
Trainable params: 532,977
Non-trainable params: 0
_________________________________________________________________

Тренировочный цикл


In [15]:
model.compile('adam', 'mae')

In [16]:
x_train = np.expand_dims(x_train, -1)

In [36]:
model.fit(x=x_train, callbacks= [ReduceLROnPlateau(patience=4, monitor='loss', epsilon=1)],
          y=x_train, 
          batch_size=128, 
          epochs=30)


Epoch 1/30
60000/60000 [==============================] - 10s 166us/step - loss: 5.0045
Epoch 2/30
60000/60000 [==============================] - 10s 167us/step - loss: 4.9930
Epoch 3/30
60000/60000 [==============================] - 10s 167us/step - loss: 4.9489
Epoch 4/30
60000/60000 [==============================] - 10s 171us/step - loss: 4.9109
Epoch 5/30
60000/60000 [==============================] - 12s 200us/step - loss: 4.8913
Epoch 6/30
60000/60000 [==============================] - 11s 189us/step - loss: 4.8453
Epoch 7/30
60000/60000 [==============================] - 10s 164us/step - loss: 4.5290
Epoch 8/30
60000/60000 [==============================] - 10s 165us/step - loss: 4.5050
Epoch 9/30
60000/60000 [==============================] - 11s 177us/step - loss: 4.4961
Epoch 10/30
60000/60000 [==============================] - 10s 171us/step - loss: 4.4908
Epoch 11/30
60000/60000 [==============================] - 11s 176us/step - loss: 4.4523
Epoch 12/30
60000/60000 [==============================] - 11s 180us/step - loss: 4.4486
Epoch 13/30
60000/60000 [==============================] - 12s 194us/step - loss: 4.4475
Epoch 14/30
60000/60000 [==============================] - 11s 183us/step - loss: 4.4462
Epoch 15/30
60000/60000 [==============================] - 11s 177us/step - loss: 4.4408
Epoch 16/30
60000/60000 [==============================] - 10s 163us/step - loss: 4.4402
Epoch 17/30
60000/60000 [==============================] - 10s 165us/step - loss: 4.4400
Epoch 18/30
60000/60000 [==============================] - 11s 176us/step - loss: 4.4398
Epoch 19/30
 8192/60000 [===>..........................] - ETA: 8s - loss: 4.4297
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-36-7735634de308> in <module>()
      2           y=x_train,
      3           batch_size=128,
----> 4           epochs=30)

~/.conda/envs/heads/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
   1667                               initial_epoch=initial_epoch,
   1668                               steps_per_epoch=steps_per_epoch,
-> 1669                               validation_steps=validation_steps)
   1670 
   1671     def evaluate(self, x=None, y=None,

~/.conda/envs/heads/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
   1204                         ins_batch[i] = ins_batch[i].toarray()
   1205 
-> 1206                     outs = f(ins_batch)
   1207                     if not isinstance(outs, list):
   1208                         outs = [outs]

~/.conda/envs/heads/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2473         session = get_session()
   2474         updated = session.run(fetches=fetches, feed_dict=feed_dict,
-> 2475                               **self.session_kwargs)
   2476         return updated[:len(self.outputs)]
   2477 

~/.conda/envs/heads/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    887     try:
    888       result = self._run(None, fetches, feed_dict, options_ptr,
--> 889                          run_metadata_ptr)
    890       if run_metadata:
    891         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

~/.conda/envs/heads/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1118     if final_fetches or final_targets or (handle and feed_dict_tensor):
   1119       results = self._do_run(handle, final_targets, final_fetches,
-> 1120                              feed_dict_tensor, options, run_metadata)
   1121     else:
   1122       results = []

~/.conda/envs/heads/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1315     if handle is None:
   1316       return self._do_call(_run_fn, self._session, feeds, fetches, targets,
-> 1317                            options, run_metadata)
   1318     else:
   1319       return self._do_call(_prun_fn, self._session, handle, feeds, fetches)

~/.conda/envs/heads/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1321   def _do_call(self, fn, *args):
   1322     try:
-> 1323       return fn(*args)
   1324     except errors.OpError as e:
   1325       message = compat.as_text(e.message)

~/.conda/envs/heads/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1300           return tf_session.TF_Run(session, options,
   1301                                    feed_dict, fetch_list, target_list,
-> 1302                                    status, run_metadata)
   1303 
   1304     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [37]:
output = model.predict(np.expand_dims(x_test, -1), batch_size=128)

In [39]:
classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
num_classes = len(classes)
samples_per_class = 10
for y, cls in enumerate(classes):
    idxs = np.flatnonzero(y_test == y)
    idxs = np.random.choice(idxs, samples_per_class, replace=False)
    for i, idx in enumerate(idxs):
        plt_idx = i * num_classes + y + 1
        plt.subplot(samples_per_class, num_classes, plt_idx)
        plt.imshow(np.squeeze(output[idx]).astype('uint8'), cmap='gray')
        plt.axis('off')
        if i == 0:
            plt.title(cls)
plt.show()



In [ ]: