Lesson 9 Neural Super Resolution

01 SEP 2017

Wayne H Nixalo


This notebook is a code along of neural-sr.ipynb to make sure I can get super resolution as done in class working properly.

Imagenet image generation

This notebook contains implementation of a super-resolution network trained on Imagenet.


In [6]:
%matplotlib inline
# import importlib
# import utils2; importlib.reload(utils2)

import os, sys
sys.path.insert(1, os.path.join('../utils'))

from utils2 import *
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
from keras import metrics
from vgg16_avg import VGG16_Avg


Using TensorFlow backend.
/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)

In [7]:
from bcolz_array_iterator import BcolzArrayIterator

In [8]:
limit_mem()

In [9]:
path = '../data/'

All code is identical to the implementation shown in the neural-style notebook, with the exception of the BcolzArrayIterator and training implentation.


In [10]:
rn_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
preproc = lambda x: (x - rn_mean)[:, :, :, ::-1]
deproc  = lambda x,s: np.clip(x.reshape(s)[:, :, :, ::-1] + rn_mean, 0, 255)

We can't load Imagenet into memory, so we open the files and then pass them to the generator BcolzArrayIterator.


In [11]:
arr_lr = bcolz.open(path + 'trn_resized_72.bc')
arr_hr = bcolz.open(path + 'trn_resized_288.bc')

In [12]:
pars = {'verbose':0, 'callbacks': [TQDMNotebookCallback(leave_inner=True)]}

In [27]:
def conv_block(x, filters, size, stride=(2,2), mode='same', act=True):
    x = Convolution2D(filters, size, size, subsample=stride, border_mode=mode)(x)
    x = BatchNormalization(mode=2)(x)
    return Activation('relu')(x) if act else x

def res_block(ip, nf=64):
    x = conv_block(ip, nf, 3, (1,1))
    x = conv_block(x, nf, 3, (1,1), act=False)
    return merge([x, ip], mode='sum')

def up_block(x, filters, size):
    x = keras.layers.UpSampling2D()(x)
    x = Convolution2D(filters, size, size, border_mode='same')(x)
    x = BatchNormalization(mode=2)(x)
    return Activation('relu')(x)

def get_model(arr):
    inp = Input(arr.shape[1:])
    x = conv_block(inp, 64, 9, (1,1))
    for i in range(4): x = res_block(x)
    x = up_block(x, 64, 3)
    x = up_block(x, 64, 3)
    x = Convolution2D(3, 9, 9, activation='tanh', border_mode='same')(x)
    outp = Lambda(lambda x: (x+1) * 127.5)(x)
    return inp, outp

def get_outp(m, λn): return m.get_layer(f'block{λn}_conv2').output

def mean_sqr_b(diff):
    dims = list(range(1, K.ndim(diff)))
    return K.expand_dims(K.sqrt(K.mean(diff**2, dims)), 0)

def content_fn(x):
    res = 0; n=len(w)
    for i in range(n): res += mean_sqr_b(x[i] - x[i+n]) * w[i]
    return res

In [28]:
inp, outp = get_model(arr_lr)
shp = arr_hr.shape[1:]

vgg_inp = Input(shp)
vgg = VGG16(include_top=False, input_tensor=Lambda(preproc)(vgg_inp))
for λ in vgg.layers: λ.trainable=False

In [29]:
vgg_content = Model(vgg_inp, [get_outp(vgg, o) for o in [1,2,3]])
vgg1 = vgg_content(vgg_inp)
vgg2 = vgg_content(outp)

In [30]:
w = [0.1, 0.8, 0.1]

In [31]:
m_sr = Model([inp, vgg_inp], Lambda(content_fn)(vgg1 + vgg2))
m_sr.compile('adam', 'mse')

Our training implementation has been altered to accomodate the BcolzArrayIterator.

We're unable to use model.fit_generator() because that function call expects the generator to return a tuple of inputs and targts.

Our generator however yields two inputs. We can work around this by seperately pulling out our inputs from the generator and then using model.train_on_batch() with our inputs from the generator and our dummy targets. model.train_on_batch() simply does one gradient update on the batch of data.

This technique of creating your own training loop is useful when you're working with various iterators or complicated inputs that don't conform to keras' standard fitting methods.


In [64]:
def train(bs, niter=10):
    targ = np.zeros((bs, 1))
    bc = BcolzArrayIterator(arr_hr, arr_lr, batch_size=bs)
    for i in range(niter):
        hr, lr = next(bc)
        m_sr.train_on_batch([lr[:bs], hr[:bs]], targ)

In [73]:
bc = BcolzArrayIterator(arr_hr, arr_lr)


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-73-c4451ae4f494> in <module>()
----> 1 bc = BcolzArrayIterator(arr_hr, arr_lr)

/home/wnixalo/Kaukasos/FAI02/utils/bcolz_array_iterator.py in __init__(self, X, y, w, batch_size, shuffle, seed)
     40                              'Found: X.shape = %s, w.shape = %s' % (X.shape, w.shape))
     41         if batch_size % X.chunklen != 0:
---> 42             raise ValueError('batch_size needs to be a multiple of X.chunklen')
     43 
     44         self.chunks_per_batch = batch_size // X.chunklen

ValueError: batch_size needs to be a multiple of X.chunklen

In [72]:
len(arr_hr)


Out[72]:
19439

In [65]:
its = len(arr_hr)//8; its


Out[65]:
2429

NOTE: Batch size must be a multiple of the chunk length.

so Im guessing chunk length is the size of what I'm dividing arr_hr by


In [70]:
temp = BcolzArrayIterator(arr_hr, arr_lr, batch_size = 160)


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-70-347412d25861> in <module>()
----> 1 temp = BcolzArrayIterator(arr_hr, arr_lr, batch_size = 160)

/home/wnixalo/Kaukasos/FAI02/utils/bcolz_array_iterator.py in __init__(self, X, y, w, batch_size, shuffle, seed)
     40                              'Found: X.shape = %s, w.shape = %s' % (X.shape, w.shape))
     41         if batch_size % X.chunklen != 0:
---> 42             raise ValueError('batch_size needs to be a multiple of X.chunklen')
     43 
     44         self.chunks_per_batch = batch_size // X.chunklen

ValueError: batch_size needs to be a multiple of X.chunklen

In [ ]:


In [67]:
%time train (1, 200)


---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-67-a60977aa0477> in <module>()
----> 1 get_ipython().magic('time train (1, 200)')

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py in magic(self, arg_s)
   2158         magic_name, _, magic_arg_s = arg_s.partition(' ')
   2159         magic_name = magic_name.lstrip(prefilter.ESC_MAGIC)
-> 2160         return self.run_line_magic(magic_name, magic_arg_s)
   2161 
   2162     #-------------------------------------------------------------------------

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py in run_line_magic(self, magic_name, line)
   2079                 kwargs['local_ns'] = sys._getframe(stack_depth).f_locals
   2080             with self.builtin_trap:
-> 2081                 result = fn(*args,**kwargs)
   2082             return result
   2083 

<decorator-gen-60> in time(self, line, cell, local_ns)

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/magic.py in <lambda>(f, *a, **k)
    186     # but it's overkill for just that one bit of state.
    187     def magic_deco(arg):
--> 188         call = lambda f, *a, **k: f(*a, **k)
    189 
    190         if callable(arg):

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/magics/execution.py in time(self, line, cell, local_ns)
   1179         if mode=='eval':
   1180             st = clock2()
-> 1181             out = eval(code, glob, local_ns)
   1182             end = clock2()
   1183         else:

<timed eval> in <module>()

<ipython-input-64-75786c373335> in train(bs, niter)
      1 def train(bs, niter=10):
      2     targ = np.zeros((bs, 1))
----> 3     bc = BcolzArrayIterator(arr_hr, arr_lr, batch_size=bs)
      4     for i in range(niter):
      5         hr, lr = next(bc)

/home/wnixalo/Kaukasos/FAI02/utils/bcolz_array_iterator.py in __init__(self, X, y, w, batch_size, shuffle, seed)
     40                              'Found: X.shape = %s, w.shape = %s' % (X.shape, w.shape))
     41         if batch_size % X.chunklen != 0:
---> 42             raise ValueError('batch_size needs to be a multiple of X.chunklen')
     43 
     44         self.chunks_per_batch = batch_size // X.chunklen

ValueError: batch_size needs to be a multiple of X.chunklen

In [57]:
len(arr_hr)


Out[57]:
19439

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: