In [2]:
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, UpSampling2D
from keras import backend as K

import random
import glob
import wandb
from wandb.keras import WandbCallback
import subprocess
import os
from PIL import Image
import numpy as np
from matplotlib.pyplot import imshow, figure

In [3]:
#initialize wandb and download dataset

hyperparams = {"num_epochs": 10, 
          "batch_size": 32,
          "height": 96,
          "width": 96}

wandb.init(config=hyperparams)
config = wandb.config

val_dir = 'catz/test'
train_dir = 'catz/train'

# automatically get the data if it doesn't exist
if not os.path.exists("catz"):
    print("Downloading catz dataset...")
    subprocess.check_output(
        "curl https://storage.googleapis.com/wandb/catz.tar.gz | tar xz", shell=True)


W&B Run: https://app.wandb.ai/l2k2/ml-class-examples_keras-video-predict/runs/cje1n44s
Call `%%wandb` in the cell containing your training loop to display live results.

In [4]:
# generator to loop over train and test images

def my_generator(batch_size, img_dir):
    """A generator that returns 5 images plus a result image"""
    cat_dirs = glob.glob(img_dir + "/*")
    counter = 0
    while True:
        input_images = np.zeros(
            (batch_size, config.width, config.height, 3 * 5))
        output_images = np.zeros((batch_size, config.width, config.height, 3))
        random.shuffle(cat_dirs)
        if (counter+batch_size >= len(cat_dirs)):
            counter = 0
        for i in range(batch_size):
            input_imgs = glob.glob(cat_dirs[counter + i] + "/cat_[0-5]*")
            imgs = [Image.open(img) for img in sorted(input_imgs)]
            input_images[i] = np.concatenate(imgs, axis=2)
            output_images[i] = np.array(Image.open(
                cat_dirs[counter + i] + "/cat_result.jpg"))
            input_images[i] /= 255.
            output_images[i] /= 255.
        yield (input_images, output_images)
        counter += batch_size
        
steps_per_epoch = len(glob.glob(train_dir + "/*")) // config.batch_size
validation_steps = len(glob.glob(val_dir + "/*")) // config.batch_size

In [5]:
#callback to log the images

class ImageCallback(Callback):
    def on_epoch_end(self, epoch, logs):
        validation_X, validation_y = next(
            my_generator(15, val_dir))
        output = self.model.predict(validation_X)
        wandb.log({
            "input": [wandb.Image(np.concatenate(np.split(c, 5, axis=2), axis=1)) for c in validation_X],
            "output": [wandb.Image(np.concatenate([validation_y[i], o], axis=1)) for i, o in enumerate(output)]
        }, commit=False)

In [83]:
# Test the generator
gen = my_generator(2, train_dir)
videos, next_frame = next(gen)
videos[0].shape
next_frame[0].shape


Out[83]:
(96, 96, 3)

In [64]:
figure()
imshow(videos[0][:,:,0:3])
figure()
imshow(videos[0][:,:,3:6])
figure()
imshow(videos[0][:,:,6:9])
figure()
imshow(videos[0][:,:,9:12])

figure()
imshow(next_frame[0][:,:,0:3])


Out[64]:
<matplotlib.image.AxesImage at 0x7fb04f5b7a90>

In [7]:
# Function for measuring how similar two images are
def perceptual_distance(y_true, y_pred):
    y_true *= 255.
    y_pred *= 255.
    rmean = (y_true[:, :, :, 0] + y_pred[:, :, :, 0]) / 2
    r = y_true[:, :, :, 0] - y_pred[:, :, :, 0]
    g = y_true[:, :, :, 1] - y_pred[:, :, :, 1]
    b = y_true[:, :, :, 2] - y_pred[:, :, :, 2]

    return K.mean(K.sqrt((((512+rmean)*r*r)/256) + 4*g*g + (((767-rmean)*b*b)/256)))

In [84]:
wandb.init(config=hyperparams)
config = wandb.config

model = Sequential()
model.add(Conv2D(3, (3, 3), activation='relu', padding='same', input_shape=(config.height, config.width, 5 * 3)))

model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(my_generator(config.batch_size, train_dir),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=config.num_epochs, callbacks=[
    ImageCallback(), WandbCallback()],
    validation_steps=validation_steps//4,
    validation_data=my_generator(config.batch_size, val_dir))


W&B Run: https://app.wandb.ai/l2k2/ml-class-examples_keras-video-predict/runs/ne93ynbj
Call `%%wandb` in the cell containing your training loop to display live results.
Epoch 1/10
52/52 [==============================] - 10s 186ms/step - loss: 0.0491 - perceptual_distance: 138.4108 - val_loss: 0.0190 - val_perceptual_distance: 76.8124
Epoch 2/10
52/52 [==============================] - 9s 170ms/step - loss: 0.0157 - perceptual_distance: 66.7613 - val_loss: 0.0138 - val_perceptual_distance: 60.2717
Epoch 3/10
52/52 [==============================] - 10s 186ms/step - loss: 0.0138 - perceptual_distance: 58.4320 - val_loss: 0.0125 - val_perceptual_distance: 56.8256
Epoch 4/10
52/52 [==============================] - 10s 189ms/step - loss: 0.0120 - perceptual_distance: 53.2015 - val_loss: 0.0103 - val_perceptual_distance: 48.9738
Epoch 5/10
52/52 [==============================] - 10s 192ms/step - loss: 0.0099 - perceptual_distance: 47.9817 - val_loss: 0.0086 - val_perceptual_distance: 44.2460
Epoch 6/10
52/52 [==============================] - 10s 188ms/step - loss: 0.0086 - perceptual_distance: 43.3613 - val_loss: 0.0091 - val_perceptual_distance: 42.4493
Epoch 7/10
52/52 [==============================] - 10s 187ms/step - loss: 0.0100 - perceptual_distance: 44.8281 - val_loss: 0.0084 - val_perceptual_distance: 40.9105
Epoch 8/10
52/52 [==============================] - 10s 186ms/step - loss: 0.0084 - perceptual_distance: 41.2744 - val_loss: 0.0082 - val_perceptual_distance: 39.9885
Epoch 9/10
52/52 [==============================] - 10s 186ms/step - loss: 0.0076 - perceptual_distance: 38.7134 - val_loss: 0.0068 - val_perceptual_distance: 37.2396
Epoch 10/10
52/52 [==============================] - 10s 189ms/step - loss: 0.0074 - perceptual_distance: 37.5170 - val_loss: 0.0063 - val_perceptual_distance: 35.2135
Out[84]:
<keras.callbacks.History at 0x7fb04f5e3e10>

In [85]:
# Baseline model - just return the last layer

from keras.layers import Lambda, Reshape, Permute

def slice(x):
    return x[:,:,:,:, -1]

wandb.init(config=hyperparams)
config = wandb.config

model=Sequential()
model.add(Reshape((96,96,5,3), input_shape=(config.height, config.width, 5 * 3)))
model.add(Permute((1,2,4,3)))
model.add(Lambda(slice, input_shape=(96,96,3,5), output_shape=(96,96,3)))

model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(my_generator(config.batch_size, train_dir),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=config.num_epochs, callbacks=[
    ImageCallback(), WandbCallback()],
    validation_steps=validation_steps//4,
    validation_data=my_generator(config.batch_size, val_dir))


W&B Run: https://app.wandb.ai/l2k2/ml-class-examples_keras-video-predict/runs/2s4jh01s
Call `%%wandb` in the cell containing your training loop to display live results.
Epoch 1/10
52/52 [==============================] - 11s 205ms/step - loss: 0.0056 - perceptual_distance: 20.8150 - val_loss: 0.0040 - val_perceptual_distance: 17.8912
Epoch 2/10
52/52 [==============================] - 9s 169ms/step - loss: 0.0051 - perceptual_distance: 20.0967 - val_loss: 0.0058 - val_perceptual_distance: 21.1593
Epoch 3/10
52/52 [==============================] - 10s 195ms/step - loss: 0.0055 - perceptual_distance: 20.2913 - val_loss: 0.0044 - val_perceptual_distance: 18.0047
Epoch 4/10
52/52 [==============================] - 10s 191ms/step - loss: 0.0062 - perceptual_distance: 21.6678 - val_loss: 0.0056 - val_perceptual_distance: 20.6382
Epoch 5/10
52/52 [==============================] - 10s 197ms/step - loss: 0.0053 - perceptual_distance: 19.8640 - val_loss: 0.0046 - val_perceptual_distance: 18.3739
Epoch 6/10
52/52 [==============================] - 10s 188ms/step - loss: 0.0059 - perceptual_distance: 21.2223 - val_loss: 0.0043 - val_perceptual_distance: 18.0448
Epoch 7/10
52/52 [==============================] - 10s 192ms/step - loss: 0.0060 - perceptual_distance: 21.6186 - val_loss: 0.0051 - val_perceptual_distance: 19.6750
Epoch 8/10
52/52 [==============================] - 10s 194ms/step - loss: 0.0063 - perceptual_distance: 20.8991 - val_loss: 0.0057 - val_perceptual_distance: 21.1382
Epoch 9/10
52/52 [==============================] - 10s 195ms/step - loss: 0.0051 - perceptual_distance: 19.7828 - val_loss: 0.0049 - val_perceptual_distance: 19.3690
Epoch 10/10
16/52 [========>.....................] - ETA: 8s - loss: 0.0053 - perceptual_distance: 20.6880
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-85-07bd85b48307> in <module>
     21     ImageCallback(), WandbCallback()],
     22     validation_steps=validation_steps//4,
---> 23     validation_data=my_generator(config.batch_size, val_dir))

/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    179             batch_index = 0
    180             while steps_done < steps_per_epoch:
--> 181                 generator_output = next(output_generator)
    182 
    183                 if not hasattr(generator_output, '__len__'):

/usr/local/lib/python3.6/dist-packages/keras/utils/data_utils.py in get(self)
    683         try:
    684             while self.is_running():
--> 685                 inputs = self.queue.get(block=True).get()
    686                 self.queue.task_done()
    687                 if inputs is not None:

/usr/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
    662 
    663     def get(self, timeout=None):
--> 664         self.wait(timeout)
    665         if not self.ready():
    666             raise TimeoutError

/usr/lib/python3.6/multiprocessing/pool.py in wait(self, timeout)
    659 
    660     def wait(self, timeout=None):
--> 661         self._event.wait(timeout)
    662 
    663     def get(self, timeout=None):

/usr/lib/python3.6/threading.py in wait(self, timeout)
    549             signaled = self._flag
    550             if not signaled:
--> 551                 signaled = self._cond.wait(timeout)
    552             return signaled
    553 

/usr/lib/python3.6/threading.py in wait(self, timeout)
    293         try:    # restore state no matter what (e.g., KeyboardInterrupt)
    294             if timeout is None:
--> 295                 waiter.acquire()
    296                 gotit = True
    297             else:

KeyboardInterrupt: 

In [86]:
# Just return the last layer, functional style

from keras.layers import Lambda, Reshape, Permute, Input
from keras.models import Model

def slice(x):
    return x[:,:,:,:, -1]

wandb.init(config=hyperparams)
config = wandb.config

inp = Input((config.height, config.width, 5 * 3))
reshaped = Reshape((96,96,5,3))(inp)
permuted = Permute((1,2,4,3))(reshaped)
last_layer = Lambda(slice, input_shape=(96,96,3,5), output_shape=(96,96,3))(permuted)
model=Model(inputs=[inp], outputs=[last_layer])

model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(my_generator(config.batch_size, train_dir),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=config.num_epochs, callbacks=[
    ImageCallback(), WandbCallback()],
    validation_steps=validation_steps//4,
    validation_data=my_generator(config.batch_size, val_dir))


W&B Run: https://app.wandb.ai/l2k2/ml-class-examples_keras-video-predict/runs/sgrxxlo6
Call `%%wandb` in the cell containing your training loop to display live results.
Epoch 1/10
52/52 [==============================] - 10s 198ms/step - loss: 0.0055 - perceptual_distance: 20.1021 - val_loss: 0.0049 - val_perceptual_distance: 19.9001
Epoch 2/10
35/52 [===================>..........] - ETA: 2s - loss: 0.0057 - perceptual_distance: 21.3775
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-86-d90819d142aa> in <module>
     23     ImageCallback(), WandbCallback()],
     24     validation_steps=validation_steps//4,
---> 25     validation_data=my_generator(config.batch_size, val_dir))

/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    179             batch_index = 0
    180             while steps_done < steps_per_epoch:
--> 181                 generator_output = next(output_generator)
    182 
    183                 if not hasattr(generator_output, '__len__'):

/usr/local/lib/python3.6/dist-packages/keras/utils/data_utils.py in get(self)
    683         try:
    684             while self.is_running():
--> 685                 inputs = self.queue.get(block=True).get()
    686                 self.queue.task_done()
    687                 if inputs is not None:

/usr/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
    662 
    663     def get(self, timeout=None):
--> 664         self.wait(timeout)
    665         if not self.ready():
    666             raise TimeoutError

/usr/lib/python3.6/multiprocessing/pool.py in wait(self, timeout)
    659 
    660     def wait(self, timeout=None):
--> 661         self._event.wait(timeout)
    662 
    663     def get(self, timeout=None):

/usr/lib/python3.6/threading.py in wait(self, timeout)
    549             signaled = self._flag
    550             if not signaled:
--> 551                 signaled = self._cond.wait(timeout)
    552             return signaled
    553 

/usr/lib/python3.6/threading.py in wait(self, timeout)
    293         try:    # restore state no matter what (e.g., KeyboardInterrupt)
    294             if timeout is None:
--> 295                 waiter.acquire()
    296                 gotit = True
    297             else:

KeyboardInterrupt: 

In [90]:
# Conv3D

from keras.layers import Lambda, Reshape, Permute, Input, add, Conv3D
from keras.models import Model

def slice(x):
    return x[:,:,:,:, -1]

hyperparams["num_epochs"] = 100
wandb.init(config=hyperparams)
config = wandb.config

inp = Input((config.height, config.width, 5 * 3))
reshaped = Reshape((96,96,5,3))(inp)
permuted = Permute((1,2,4,3))(reshaped)
last_layer = Lambda(slice, input_shape=(96,96,3,5), output_shape=(96,96,3))(permuted)
conv_output = Conv3D(1, (3,3,3), padding="same")(permuted)
conv_output_reshape = Reshape((96,96,3))(conv_output)
combined = add([last_layer, conv_output_reshape])

model=Model(inputs=[inp], outputs=[combined])

model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(my_generator(config.batch_size, train_dir),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=config.num_epochs, callbacks=[
    ImageCallback(), WandbCallback()],
    validation_steps=validation_steps//4,
    validation_data=my_generator(config.batch_size, val_dir))


W&B Run: https://app.wandb.ai/l2k2/ml-class-examples_keras-video-predict/runs/izmh58q9
Call `%%wandb` in the cell containing your training loop to display live results.
Epoch 1/100
52/52 [==============================] - 11s 221ms/step - loss: 0.0308 - perceptual_distance: 98.7534 - val_loss: 0.0142 - val_perceptual_distance: 57.7521
Epoch 2/100
52/52 [==============================] - 10s 187ms/step - loss: 0.0113 - perceptual_distance: 49.3992 - val_loss: 0.0107 - val_perceptual_distance: 46.4511
Epoch 3/100
52/52 [==============================] - 11s 207ms/step - loss: 0.0097 - perceptual_distance: 44.2018 - val_loss: 0.0082 - val_perceptual_distance: 41.2620
Epoch 4/100
52/52 [==============================] - 11s 210ms/step - loss: 0.0090 - perceptual_distance: 41.0019 - val_loss: 0.0066 - val_perceptual_distance: 35.8011
Epoch 5/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0080 - perceptual_distance: 38.0702 - val_loss: 0.0077 - val_perceptual_distance: 36.3716
Epoch 6/100
52/52 [==============================] - 11s 209ms/step - loss: 0.0068 - perceptual_distance: 35.1312 - val_loss: 0.0054 - val_perceptual_distance: 31.3413
Epoch 7/100
52/52 [==============================] - 11s 206ms/step - loss: 0.0063 - perceptual_distance: 33.3940 - val_loss: 0.0079 - val_perceptual_distance: 35.5718
Epoch 8/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0076 - perceptual_distance: 34.0418 - val_loss: 0.0062 - val_perceptual_distance: 35.1260
Epoch 9/100
52/52 [==============================] - 11s 211ms/step - loss: 0.0070 - perceptual_distance: 33.6120 - val_loss: 0.0056 - val_perceptual_distance: 30.3152
Epoch 10/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0067 - perceptual_distance: 32.4787 - val_loss: 0.0049 - val_perceptual_distance: 27.8291
Epoch 11/100
52/52 [==============================] - 11s 209ms/step - loss: 0.0066 - perceptual_distance: 31.8789 - val_loss: 0.0053 - val_perceptual_distance: 31.7373
Epoch 12/100
52/52 [==============================] - 11s 209ms/step - loss: 0.0056 - perceptual_distance: 30.2554 - val_loss: 0.0052 - val_perceptual_distance: 27.0247
Epoch 13/100
52/52 [==============================] - 11s 212ms/step - loss: 0.0057 - perceptual_distance: 29.2055 - val_loss: 0.0064 - val_perceptual_distance: 31.2573
Epoch 14/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0064 - perceptual_distance: 30.0778 - val_loss: 0.0082 - val_perceptual_distance: 30.1121
Epoch 15/100
52/52 [==============================] - 11s 213ms/step - loss: 0.0066 - perceptual_distance: 30.7880 - val_loss: 0.0052 - val_perceptual_distance: 27.1594
Epoch 16/100
52/52 [==============================] - 11s 209ms/step - loss: 0.0058 - perceptual_distance: 30.5703 - val_loss: 0.0054 - val_perceptual_distance: 27.0735
Epoch 17/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0058 - perceptual_distance: 28.7904 - val_loss: 0.0046 - val_perceptual_distance: 26.3379
Epoch 18/100
52/52 [==============================] - 11s 212ms/step - loss: 0.0059 - perceptual_distance: 29.1845 - val_loss: 0.0059 - val_perceptual_distance: 31.8081
Epoch 19/100
52/52 [==============================] - 11s 213ms/step - loss: 0.0058 - perceptual_distance: 29.8545 - val_loss: 0.0051 - val_perceptual_distance: 26.1118
Epoch 20/100
52/52 [==============================] - 11s 212ms/step - loss: 0.0058 - perceptual_distance: 27.8370 - val_loss: 0.0043 - val_perceptual_distance: 24.8734
Epoch 21/100
52/52 [==============================] - 11s 207ms/step - loss: 0.0060 - perceptual_distance: 28.7893 - val_loss: 0.0059 - val_perceptual_distance: 27.6885
Epoch 22/100
52/52 [==============================] - 11s 213ms/step - loss: 0.0051 - perceptual_distance: 26.5802 - val_loss: 0.0049 - val_perceptual_distance: 25.6146
Epoch 23/100
52/52 [==============================] - 11s 206ms/step - loss: 0.0060 - perceptual_distance: 28.0869 - val_loss: 0.0043 - val_perceptual_distance: 27.7650
Epoch 24/100
52/52 [==============================] - 11s 212ms/step - loss: 0.0051 - perceptual_distance: 27.4193 - val_loss: 0.0045 - val_perceptual_distance: 24.2038
Epoch 25/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0062 - perceptual_distance: 27.7622 - val_loss: 0.0044 - val_perceptual_distance: 24.8252
Epoch 26/100
52/52 [==============================] - 11s 206ms/step - loss: 0.0059 - perceptual_distance: 29.4451 - val_loss: 0.0079 - val_perceptual_distance: 29.5265
Epoch 27/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0056 - perceptual_distance: 27.9719 - val_loss: 0.0050 - val_perceptual_distance: 28.9442
Epoch 28/100
52/52 [==============================] - 11s 209ms/step - loss: 0.0052 - perceptual_distance: 27.6330 - val_loss: 0.0040 - val_perceptual_distance: 22.7758
Epoch 29/100
52/52 [==============================] - 11s 211ms/step - loss: 0.0056 - perceptual_distance: 29.0150 - val_loss: 0.0055 - val_perceptual_distance: 26.1886
Epoch 30/100
52/52 [==============================] - 11s 208ms/step - loss: 0.0055 - perceptual_distance: 26.3779 - val_loss: 0.0048 - val_perceptual_distance: 30.4333
Epoch 31/100
52/52 [==============================] - 11s 207ms/step - loss: 0.0063 - perceptual_distance: 28.6932 - val_loss: 0.0049 - val_perceptual_distance: 26.6216
Epoch 32/100
11/52 [=====>........................] - ETA: 10s - loss: 0.0058 - perceptual_distance: 28.8208
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-90-94e9bf7f5021> in <module>
     28     ImageCallback(), WandbCallback()],
     29     validation_steps=validation_steps//4,
---> 30     validation_data=my_generator(config.batch_size, val_dir))

/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    179             batch_index = 0
    180             while steps_done < steps_per_epoch:
--> 181                 generator_output = next(output_generator)
    182 
    183                 if not hasattr(generator_output, '__len__'):

/usr/local/lib/python3.6/dist-packages/keras/utils/data_utils.py in get(self)
    683         try:
    684             while self.is_running():
--> 685                 inputs = self.queue.get(block=True).get()
    686                 self.queue.task_done()
    687                 if inputs is not None:

/usr/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
    662 
    663     def get(self, timeout=None):
--> 664         self.wait(timeout)
    665         if not self.ready():
    666             raise TimeoutError

/usr/lib/python3.6/multiprocessing/pool.py in wait(self, timeout)
    659 
    660     def wait(self, timeout=None):
--> 661         self._event.wait(timeout)
    662 
    663     def get(self, timeout=None):

/usr/lib/python3.6/threading.py in wait(self, timeout)
    549             signaled = self._flag
    550             if not signaled:
--> 551                 signaled = self._cond.wait(timeout)
    552             return signaled
    553 

/usr/lib/python3.6/threading.py in wait(self, timeout)
    293         try:    # restore state no matter what (e.g., KeyboardInterrupt)
    294             if timeout is None:
--> 295                 waiter.acquire()
    296                 gotit = True
    297             else:

KeyboardInterrupt: 

In [ ]:
# Conv3D with Gaussian Noise

from keras.layers import Lambda, Reshape, Permute, Input, add, Conv3D, GaussianNoise
from keras.models import Model

def slice(x):
    return x[:,:,:,:, -1]

wandb.init()

inp = Input((config.height, config.width, 5 * 3))
reshaped = Reshape((96,96,5,3))(inp)
permuted = Permute((1,2,4,3))(reshaped)
noise = GaussianNoise(0.1)(permuted)
last_layer = Lambda(slice, input_shape=(96,96,3,5), output_shape=(96,96,3))(noise)
conv_output = Conv3D(1, (3,3,3), padding="same")(noise)
conv_output_reshape = Reshape((96,96,3))(conv_output)
combined = add([last_layer, conv_output_reshape])

model=Model(inputs=[inp], outputs=[combined])

model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(my_generator(config.batch_size, train_dir),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=config.num_epochs, callbacks=[
    ImageCallback(), WandbCallback()],
    validation_steps=validation_steps//4,
    validation_data=my_generator(config.batch_size, val_dir))


W&B Run: https://app.wandb.ai/l2k2/ml-class-examples_keras-video-predict/runs/b69epc6v
Call `%%wandb` in the cell containing your training loop to display live results.
Epoch 1/100
52/52 [==============================] - 12s 221ms/step - loss: 0.0320 - perceptual_distance: 118.7155 - val_loss: 0.0097 - val_perceptual_distance: 42.7599
Epoch 2/100
52/52 [==============================] - 9s 180ms/step - loss: 0.0251 - perceptual_distance: 106.1317 - val_loss: 0.0064 - val_perceptual_distance: 35.6701
Epoch 3/100
52/52 [==============================] - 11s 212ms/step - loss: 0.0207 - perceptual_distance: 97.0222 - val_loss: 0.0071 - val_perceptual_distance: 33.8467
Epoch 4/100
52/52 [==============================] - 11s 213ms/step - loss: 0.0191 - perceptual_distance: 91.4583 - val_loss: 0.0058 - val_perceptual_distance: 31.4700
Epoch 5/100
52/52 [==============================] - 11s 220ms/step - loss: 0.0174 - perceptual_distance: 85.7698 - val_loss: 0.0078 - val_perceptual_distance: 33.1324
Epoch 6/100
52/52 [==============================] - 11s 217ms/step - loss: 0.0142 - perceptual_distance: 78.2606 - val_loss: 0.0062 - val_perceptual_distance: 30.2159
Epoch 7/100
52/52 [==============================] - 11s 210ms/step - loss: 0.0131 - perceptual_distance: 73.5458 - val_loss: 0.0068 - val_perceptual_distance: 34.0281
Epoch 8/100
52/52 [==============================] - 11s 207ms/step - loss: 0.0123 - perceptual_distance: 69.6759 - val_loss: 0.0049 - val_perceptual_distance: 27.8792
Epoch 9/100
52/52 [==============================] - 11s 206ms/step - loss: 0.0116 - perceptual_distance: 65.7430 - val_loss: 0.0049 - val_perceptual_distance: 27.5991
Epoch 10/100
52/52 [==============================] - 11s 206ms/step - loss: 0.0099 - perceptual_distance: 60.9869 - val_loss: 0.0042 - val_perceptual_distance: 26.8102
Epoch 11/100
52/52 [==============================] - 11s 209ms/step - loss: 0.0101 - perceptual_distance: 59.4733 - val_loss: 0.0046 - val_perceptual_distance: 28.4542
Epoch 12/100
52/52 [==============================] - 11s 209ms/step - loss: 0.0088 - perceptual_distance: 55.6481 - val_loss: 0.0042 - val_perceptual_distance: 25.8669
Epoch 13/100
 5/52 [=>............................] - ETA: 9s - loss: 0.0105 - perceptual_distance: 57.6925
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-92-eb8b26405d66> in <module>
     27     ImageCallback(), WandbCallback()],
     28     validation_steps=validation_steps//4,
---> 29     validation_data=my_generator(config.batch_size, val_dir))

/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    179             batch_index = 0
    180             while steps_done < steps_per_epoch:
--> 181                 generator_output = next(output_generator)
    182 
    183                 if not hasattr(generator_output, '__len__'):

/usr/local/lib/python3.6/dist-packages/keras/utils/data_utils.py in get(self)
    683         try:
    684             while self.is_running():
--> 685                 inputs = self.queue.get(block=True).get()
    686                 self.queue.task_done()
    687                 if inputs is not None:

/usr/lib/python3.6/multiprocessing/pool.py in get(self, timeout)
    662 
    663     def get(self, timeout=None):
--> 664         self.wait(timeout)
    665         if not self.ready():
    666             raise TimeoutError

/usr/lib/python3.6/multiprocessing/pool.py in wait(self, timeout)
    659 
    660     def wait(self, timeout=None):
--> 661         self._event.wait(timeout)
    662 
    663     def get(self, timeout=None):

/usr/lib/python3.6/threading.py in wait(self, timeout)
    549             signaled = self._flag
    550             if not signaled:
--> 551                 signaled = self._cond.wait(timeout)
    552             return signaled
    553 

/usr/lib/python3.6/threading.py in wait(self, timeout)
    293         try:    # restore state no matter what (e.g., KeyboardInterrupt)
    294             if timeout is None:
--> 295                 waiter.acquire()
    296                 gotit = True
    297             else:

KeyboardInterrupt: 
ERROR:root:Invalid alias: The name clear can't be aliased because it is another magic command.
ERROR:root:Invalid alias: The name more can't be aliased because it is another magic command.
ERROR:root:Invalid alias: The name less can't be aliased because it is another magic command.
ERROR:root:Invalid alias: The name man can't be aliased because it is another magic command.

In [10]:
# Conv2DLSTM with Gaussian Noise

from keras.layers import Lambda, Reshape, Permute, Input, add, Conv3D, GaussianNoise, ConvLSTM2D
from keras.models import Model

def slice(x):
    return x[:,:,:,:, -1]

wandb.init(config=hyperparams)
config = wandb.config

inp = Input((config.height, config.width, 5 * 3))
reshaped = Reshape((96,96,5,3))(inp)
permuted = Permute((1,2,4,3))(reshaped)
noise = GaussianNoise(0.1)(permuted)
last_layer = Lambda(slice, input_shape=(96,96,3,5), output_shape=(96,96,3))(noise)
permuted_2 = Permute((4,1,2,3))(noise)

conv_lstm_output_1 = ConvLSTM2D(6, (3,3), padding='same')(permuted_2)
conv_output = Conv2D(3, (3,3), padding="same")(conv_lstm_output_1)
combined = add([last_layer, conv_output])

model=Model(inputs=[inp], outputs=[combined])

model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(my_generator(config.batch_size, train_dir),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=config.num_epochs, callbacks=[
    ImageCallback(), WandbCallback()],
    validation_steps=validation_steps//4,
    validation_data=my_generator(config.batch_size, val_dir))


Epoch 1/40
52/52 [==============================] - 14s 265ms/step - loss: 0.0090 - perceptual_distance: 54.1361 - val_loss: 0.0038 - val_perceptual_distance: 25.8277
Epoch 2/40
52/52 [==============================] - 12s 239ms/step - loss: 0.0088 - perceptual_distance: 52.7034 - val_loss: 0.0049 - val_perceptual_distance: 27.9076
Epoch 3/40
52/52 [==============================] - 13s 253ms/step - loss: 0.0087 - perceptual_distance: 51.7484 - val_loss: 0.0053 - val_perceptual_distance: 28.5357
Epoch 4/40
52/52 [==============================] - 13s 251ms/step - loss: 0.0078 - perceptual_distance: 49.3450 - val_loss: 0.0044 - val_perceptual_distance: 25.3791
Epoch 5/40
52/52 [==============================] - 13s 252ms/step - loss: 0.0084 - perceptual_distance: 49.8420 - val_loss: 0.0048 - val_perceptual_distance: 26.8417
Epoch 6/40
52/52 [==============================] - 14s 265ms/step - loss: 0.0079 - perceptual_distance: 48.2071 - val_loss: 0.0041 - val_perceptual_distance: 27.0815
Epoch 7/40
52/52 [==============================] - 13s 252ms/step - loss: 0.0077 - perceptual_distance: 47.1554 - val_loss: 0.0051 - val_perceptual_distance: 28.9918
Epoch 8/40
12/52 [=====>........................] - ETA: 10s - loss: 0.0070 - perceptual_distance: 46.3427
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-10-328120765fc7> in <module>
     30     ImageCallback(), WandbCallback()],
     31     validation_steps=validation_steps//4,
---> 32     validation_data=my_generator(config.batch_size, val_dir))

/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    215                 outs = model.train_on_batch(x, y,
    216                                             sample_weight=sample_weight,
--> 217                                             class_weight=class_weight)
    218 
    219                 outs = to_list(outs)

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1215             ins = x + y + sample_weights
   1216         self._make_train_function()
-> 1217         outputs = self.train_function(ins)
   1218         return unpack_singleton(outputs)
   1219 

/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2713                 return self._legacy_call(inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:
   2717             if py_any(is_tensor(x) for x in inputs):

/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
   2673             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]
   2677 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 

In [11]:
# Conv2DLSTM with Gaussian Noise

from keras.layers import Lambda, Reshape, Permute, Input, add, Conv3D, GaussianNoise, concatenate
from keras.layers import ConvLSTM2D, BatchNormalization, TimeDistributed, Add
from keras.models import Model

def slice(x):
    return x[:,:,:,:, -1]

wandb.init(config=hyperparams)
config = wandb.config

c=4

inp = Input((config.height, config.width, 5 * 3))
reshaped = Reshape((96,96,5,3))(inp)
permuted = Permute((1,2,4,3))(reshaped)
noise = GaussianNoise(0.1)(permuted)
last_layer = Lambda(slice, input_shape=(96,96,3,5), output_shape=(96,96,3))(noise)
x = Permute((4,1,2,3))(noise)
x =(ConvLSTM2D(filters=c, kernel_size=(3,3),padding='same',name='conv_lstm1', return_sequences=True))(x)

c1=(BatchNormalization())(x)
x = Dropout(0.2)(x)
x =(TimeDistributed(MaxPooling2D(pool_size=(2,2))))(c1)

x =(ConvLSTM2D(filters=2*c,kernel_size=(3,3),padding='same',name='conv_lstm3',return_sequences=True))(x)
c2=(BatchNormalization())(x)
x = Dropout(0.2)(x)

x =(TimeDistributed(MaxPooling2D(pool_size=(2,2))))(c2)
x =(ConvLSTM2D(filters=4*c,kernel_size=(3,3),padding='same',name='conv_lstm4',return_sequences=True))(x)

x =(TimeDistributed(UpSampling2D(size=(2, 2))))(x)
x =(ConvLSTM2D(filters=4*c,kernel_size=(3,3),padding='same',name='conv_lstm5',return_sequences=True))(x)
x =(BatchNormalization())(x)

x =(ConvLSTM2D(filters=2*c,kernel_size=(3,3),padding='same',name='conv_lstm6',return_sequences=True))(x)
x =(BatchNormalization())(x)
x = Add()([c2, x])
x = Dropout(0.2)(x)

x =(TimeDistributed(UpSampling2D(size=(2, 2))))(x)
x =(ConvLSTM2D(filters=c,kernel_size=(3,3),padding='same',name='conv_lstm7',return_sequences=False))(x)
x =(BatchNormalization())(x)
combined = concatenate([last_layer, x])
combined = Conv2D(3, (1,1))(combined)
model=Model(inputs=[inp], outputs=[combined])

model.compile(optimizer='adam', loss='mse', metrics=[perceptual_distance])

model.fit_generator(my_generator(config.batch_size, train_dir),
                    steps_per_epoch=steps_per_epoch//4,
                    epochs=config.num_epochs, callbacks=[
    ImageCallback(), WandbCallback()],
    validation_steps=validation_steps//4,
    validation_data=my_generator(config.batch_size, val_dir))


W&B Run: https://app.wandb.ai/l2k2/ml-class-examples_keras-video-predict/runs/pa5thspf
Call `%%wandb` in the cell containing your training loop to display live results.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
Epoch 1/10
 3/52 [>.............................] - ETA: 2:56 - loss: 0.6657 - perceptual_distance: 519.0305
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-11-148f823e326c> in <module>
     55     ImageCallback(), WandbCallback()],
     56     validation_steps=validation_steps//4,
---> 57     validation_data=my_generator(config.batch_size, val_dir))

/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
    215                 outs = model.train_on_batch(x, y,
    216                                             sample_weight=sample_weight,
--> 217                                             class_weight=class_weight)
    218 
    219                 outs = to_list(outs)

/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1215             ins = x + y + sample_weights
   1216         self._make_train_function()
-> 1217         outputs = self.train_function(ins)
   1218         return unpack_singleton(outputs)
   1219 

/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   2713                 return self._legacy_call(inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:
   2717             if py_any(is_tensor(x) for x in inputs):

/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
   2673             fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]
   2677 

/usr/local/lib/python3.6/dist-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
   1437           ret = tf_session.TF_SessionRunCallable(
   1438               self._session._session, self._handle, args, status,
-> 1439               run_metadata_ptr)
   1440         if run_metadata:
   1441           proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

KeyboardInterrupt: 

In [ ]: