01 SEP 2017

WNixalo


In [1]:
%matplotlib inline
import os, sys; sys.path.insert(1, os.path.join('../utils'))

import importlib
import utils2; importlib.reload(utils2)
from utils2 import *

from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
from keras import metrics

from vgg16_avg import VGG16_Avg


Using TensorFlow backend.
/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/sklearn/cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)

In [2]:
# Tell Tensorflow to use no more GPU RAM than necessary
limit_mem()
path = '../data/nst/'
dpath = '../data/'

In [3]:
fnames = glob.glob(path+'/*.JPG', recursive=True)
n = len(fnames); n


Out[3]:
1

In [4]:
fn = fnames[0]; fn


Out[4]:
'../data/nst/IMG_2934.JPG'

In [5]:
img = Image.open(fnames[0]); img


Out[5]:

In [6]:
rn_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
preproc = lambda x: (x - rn_mean)[:, :, :, ::-1]
deproc = lambda x,s: np.clip(x.reshape(s)[:, :, :, ::-1] + rn_mean, 0, 255)

In [7]:
img_arr = preproc(np.expand_dims(np.array(img), 0))
shp = img_arr.shape

In [8]:
arr_lr = bcolz.open(dpath+'trn_resized_72.bc')[:]
arr_hr = bcolz.open(dpath+'trn_resized_288.bc')[:]

In [9]:
parms = {'verbose': 0, 'callbacks': [TQDMNotebookCallback(leave_inner=True)]}

In [10]:
def conv_block(x, filters, size, stride=(2,2), mode='same', act=True):
    x = Convolution2D(filters, size, size, subsample=stride, border_mode=mode)(x)
    x = BatchNormalization(mode=2)(x)
    return Activation('relu')(x) if act else x
def res_block(ip, nf=64):
    x = conv_block(ip, nf, 3, (1,1))
    x = conv_block(x, nf, 3, (1,1), act=False)
    return merge([x, ip], mode='sum')
def deconv_block(x, filters, size, shape, stride=(2,2)):
    x = Deconvolution2D(filters, size, size, subsample=stride, 
        border_mode='same', output_shape=(None,)+shape)(x)
    x = BatchNormalization(mode=2)(x)
    return Activation('relu')(x)
def up_block(x, filters, size):
    x = keras.layers.UpSampling2D()(x)
    x = Convolution2D(filters, size, size, border_mode='same')(x)
    x = BatchNormalization(mode=2)(x)
    return Activation('relu')(x)

In [11]:
inp=Input(arr_lr.shape[1:])
x=conv_block(inp, 64, 9, (1,1))
for i in range(4): x=res_block(x)
x=up_block(x, 64, 3)
x=up_block(x, 64, 3)
x=Convolution2D(3, 9, 9, activation='tanh', border_mode='same')(x)
outp=Lambda(lambda x: (x+1)*127.5)(x)

In [12]:
inp_shape = arr_lr.shape[1:]
out_shape = arr_hr.shape[1:]

In [13]:
vgg_inp=Input(out_shape)
vgg= VGG16(include_top=False, input_tensor=Lambda(preproc)(vgg_inp))

In [14]:
for l in vgg.layers: l.trainable=False

In [15]:
def get_outp(m, ln): return m.get_layer(f'block{ln}_conv1').output
vgg_content = Model(vgg_inp, [get_outp(vgg, o) for o in [1,2,3]])
vgg1 = vgg_content(vgg_inp)
vgg2 = vgg_content(outp)

In [16]:
def mean_sqr_b(diff): 
    dims = list(range(1,K.ndim(diff)))
    return K.expand_dims(K.sqrt(K.mean(diff**2, dims)), 0)

In [17]:
w=[0.1, 0.8, 0.1]
def content_fn(x): 
    res = 0; n=len(w)
    for i in range(n): res += mean_sqr_b(x[i]-x[i+n]) * w[i]
    return res

In [18]:
m_sr = Model([inp, vgg_inp], Lambda(content_fn)(vgg1+vgg2))
targ = np.zeros((arr_hr.shape[0], 1))

In [31]:
m_sr.compile('adam', 'mse')
m_sr.fit([arr_lr, arr_hr], targ, 8, 2, **parms)


---------------------------------------------------------------------------
ResourceExhaustedError                    Traceback (most recent call last)
/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1138     try:
-> 1139       return fn(*args)
   1140     except errors.OpError as e:

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1120                                  feed_dict, fetch_list, target_list,
-> 1121                                  status, run_metadata)
   1122 

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/contextlib.py in __exit__(self, type, value, traceback)
     88             try:
---> 89                 next(self.gen)
     90             except StopIteration:

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in raise_exception_on_not_ok_status()
    465           compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466           pywrap_tensorflow.TF_GetCode(status))
    467   finally:

ResourceExhaustedError: OOM when allocating tensor with shape[8,64,144,144]
	 [[Node: gradients/MaxPool_7_grad/MaxPoolGrad = MaxPoolGrad[T=DT_FLOAT, _class=["loc:@MaxPool_7"], data_format="NHWC", ksize=[1, 2, 2, 1], padding="VALID", strides=[1, 2, 2, 1], _device="/job:localhost/replica:0/task:0/gpu:0"](Relu_26, MaxPool_7, gradients/Conv2D_32_grad/Conv2DBackpropInput)]]

During handling of the above exception, another exception occurred:

ResourceExhaustedError                    Traceback (most recent call last)
<ipython-input-31-c797f3e38711> in <module>()
      1 m_sr.compile('adam', 'mse')
----> 2 m_sr.fit([arr_lr, arr_hr], targ, 8, 2, **parms)

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, nb_epoch, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch)
   1194                               val_f=val_f, val_ins=val_ins, shuffle=shuffle,
   1195                               callback_metrics=callback_metrics,
-> 1196                               initial_epoch=initial_epoch)
   1197 
   1198     def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, nb_epoch, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
    889                 batch_logs['size'] = len(batch_ids)
    890                 callbacks.on_batch_begin(batch_index, batch_logs)
--> 891                 outs = f(ins_batch)
    892                 if not isinstance(outs, list):
    893                     outs = [outs]

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
   1941         session = get_session()
   1942         updated = session.run(self.outputs + [self.updates_op],
-> 1943                               feed_dict=feed_dict)
   1944         return updated[:len(self.outputs)]
   1945 

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
    787     try:
    788       result = self._run(None, fetches, feed_dict, options_ptr,
--> 789                          run_metadata_ptr)
    790       if run_metadata:
    791         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    995     if final_fetches or final_targets:
    996       results = self._do_run(handle, final_targets, final_fetches,
--> 997                              feed_dict_string, options, run_metadata)
    998     else:
    999       results = []

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1130     if handle is None:
   1131       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1132                            target_list, options, run_metadata)
   1133     else:
   1134       return self._do_call(_prun_fn, self._session, handle, feed_dict,

/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _do_call(self, fn, *args)
   1150         except KeyError:
   1151           pass
-> 1152       raise type(e)(node_def, op, message)
   1153 
   1154   def _extend_graph(self):

ResourceExhaustedError: OOM when allocating tensor with shape[8,64,144,144]
	 [[Node: gradients/MaxPool_7_grad/MaxPoolGrad = MaxPoolGrad[T=DT_FLOAT, _class=["loc:@MaxPool_7"], data_format="NHWC", ksize=[1, 2, 2, 1], padding="VALID", strides=[1, 2, 2, 1], _device="/job:localhost/replica:0/task:0/gpu:0"](Relu_26, MaxPool_7, gradients/Conv2D_32_grad/Conv2DBackpropInput)]]

Caused by op 'gradients/MaxPool_7_grad/MaxPoolGrad', defined at:
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/runpy.py", line 193, in _run_module_as_main
    "__main__", mod_spec)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/ipykernel_launcher.py", line 16, in <module>
    app.launch_new_instance()
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/traitlets/config/application.py", line 658, in launch_instance
    app.start()
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/ipykernel/kernelapp.py", line 477, in start
    ioloop.IOLoop.instance().start()
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/zmq/eventloop/ioloop.py", line 177, in start
    super(ZMQIOLoop, self).start()
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tornado/ioloop.py", line 888, in start
    handler_func(fd_obj, events)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
    self._handle_recv()
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
    self._run_callback(callback, msg)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
    callback(*args, **kwargs)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tornado/stack_context.py", line 277, in null_wrapper
    return fn(*args, **kwargs)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 283, in dispatcher
    return self.dispatch_shell(stream, msg)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 235, in dispatch_shell
    handler(stream, idents, msg)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/ipykernel/kernelbase.py", line 399, in execute_request
    user_expressions, allow_stdin)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/ipykernel/ipkernel.py", line 196, in do_execute
    res = shell.run_cell(code, store_history=store_history, silent=silent)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/ipykernel/zmqshell.py", line 533, in run_cell
    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
    interactivity=interactivity, compiler=compiler, result=result)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2828, in run_ast_nodes
    if self.run_code(code, result):
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2882, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-31-c797f3e38711>", line 2, in <module>
    m_sr.fit([arr_lr, arr_hr], targ, 8, 2, **parms)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/training.py", line 1168, in fit
    self._make_train_function()
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/training.py", line 760, in _make_train_function
    self.total_loss)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/optimizers.py", line 416, in get_updates
    grads = self.get_gradients(loss, params)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/optimizers.py", line 82, in get_gradients
    grads = K.gradients(loss, params)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 1968, in gradients
    return tf.gradients(loss, variables, colocate_gradients_with_ops=True)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py", line 540, in gradients
    grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py", line 346, in _MaybeCompile
    return grad_fn()  # Exit early
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py", line 540, in <lambda>
    grad_scope, op, func_call, lambda: grad_fn(op, *out_grads))
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/ops/nn_grad.py", line 525, in _MaxPoolGrad
    data_format=op.get_attr("data_format"))
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 1752, in _max_pool_grad
    data_format=data_format, name=name)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
    op_def=op_def)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2506, in create_op
    original_op=self._default_original_op, op_def=op_def)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1269, in __init__
    self._traceback = _extract_stack()

...which was originally created as op 'MaxPool_7', defined at:
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/runpy.py", line 193, in _run_module_as_main
    "__main__", mod_spec)
[elided 16 identical lines from previous traceback]
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
    interactivity=interactivity, compiler=compiler, result=result)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
    if self.run_code(code, result):
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/IPython/core/interactiveshell.py", line 2882, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-27-6c923b4f7e8e>", line 4, in <module>
    vgg2 = vgg_content(outp)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/topology.py", line 572, in __call__
    self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/topology.py", line 635, in add_inbound_node
    Node.create_node(self, inbound_layers, node_indices, tensor_indices)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/topology.py", line 166, in create_node
    output_tensors = to_list(outbound_layer.call(input_tensors[0], mask=input_masks[0]))
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/topology.py", line 2247, in call
    output_tensors, output_masks, output_shapes = self.run_internal_graph(inputs, masks)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/engine/topology.py", line 2390, in run_internal_graph
    computed_mask))
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/layers/pooling.py", line 160, in call
    dim_ordering=self.dim_ordering)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/layers/pooling.py", line 210, in _pooling_function
    pool_mode='max')
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2866, in pool2d
    x = tf.nn.max_pool(x, pool_size, strides, padding=padding)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/ops/nn_ops.py", line 1769, in max_pool
    name=name)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/ops/gen_nn_ops.py", line 1605, in _max_pool
    data_format=data_format, name=name)
  File "/home/wnixalo/miniconda3/envs/FAI3/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 767, in apply_op
    op_def=op_def)

ResourceExhaustedError (see above for traceback): OOM when allocating tensor with shape[8,64,144,144]
	 [[Node: gradients/MaxPool_7_grad/MaxPoolGrad = MaxPoolGrad[T=DT_FLOAT, _class=["loc:@MaxPool_7"], data_format="NHWC", ksize=[1, 2, 2, 1], padding="VALID", strides=[1, 2, 2, 1], _device="/job:localhost/replica:0/task:0/gpu:0"](Relu_26, MaxPool_7, gradients/Conv2D_32_grad/Conv2DBackpropInput)]]

In [19]:
m_sr.save_weights(dpath + 'sr-imgs/m_sr_1ep.h5')

In [ ]:
m_sr.compile('adam', 'mse')
m_sr.load_weights(dpath + )

In [ ]:
K.set_value(m_sr.optimizer.lr, 1e-4)
m_sr.fit([arr_lr, arr_hr], targ, 16, 1, **parms)

In [ ]:
top_model = Model(inp, outp)

In [ ]:
p = top_model.predict(arr_lr[10:11])

In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]:


In [ ]: