In [1]:
from __future__ import print_function, division
import theano
from theano import tensor as T
from theano.tensor.nnet import conv

import numpy as np
import pylab
from PIL import Image

In [2]:
# code from http://deeplearning.net/tutorial/lenet.html
rng = np.random.RandomState(23455)

# instantiate 4D tensor for input
input = T.tensor4(name='input')

# initialize shared variable for weights.
# The shape of the tensor is: 
#    0: number of feature maps at layer m   (i.e. the output)
#    1: number of feature maps at layer m-1 (i.e. the input)
#    2: filter height
#    3: filter width
# The input consists of 3 features maps (an RGB color image) of size 120x160.
# We use two convolutional filters with 9x9 receptive fields.
w_shape = (1, 3, 3, 3)
w_bound = np.sqrt(1 * 3 * 3)
W = theano.shared(
    np.asarray(
        rng.uniform(
            low=0,   # -1.0 / w_bound,
            high=1.0, #  / w_bound,
            size=w_shape),
        dtype=input.dtype),
    name ='W')

# build symbolic expression that computes the convolution of input with filters in w
conv_out = conv.conv2d(input, filters=W, border_mode='valid')

# ignoring biases for now
output = conv_out

# create theano function to compute filtered images
f = theano.function([input], output)

In [312]:
# open random image of dimensions 639x516
img = Image.open(open('3wolfmoon.jpg'))
# dimensions are (height, width, channel)
img = numpy.asarray(img, dtype='float64') / 256.

# make B&W
# img = img.mean(axis=2, keepdims=True)

# put image in 4D tensor of shape (1, 3, height, width)
img_ = img.transpose(2, 0, 1).reshape(1, 3, 639, 516)
filtered_img = f(img_)

# plot original image and first and second components of output
pylab.subplot(1, 3, 1); pylab.axis('off'); pylab.imshow(img[:,:,:])
pylab.gray();
# recall that the convOp output (filtered image) is actually a "minibatch",
# of size 1 here, so we take index 0 in the first dimension:
pylab.subplot(1, 3, 2); pylab.axis('off'); pylab.imshow(filtered_img[0, 0, :, :])
#pylab.subplot(1, 3, 3); pylab.axis('off'); pylab.imshow(filtered_img[0, 1, :, :])
pylab.show()

In [22]:
theano.tensor.sharedvar.TensorSharedVariable(W.T.name, W.T.type, W.T.eval(), True)


Out[22]:
W.T

In [25]:
theano.shared(W.T, borrow=True)


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-25-7004d3c7b1f7> in <module>()
----> 1 theano.shared(W.T, borrow=True)

/usr/local/lib/python2.7/dist-packages/theano/compile/sharedvalue.pyc in shared(value, name, strict, allow_downcast, **kwargs)
    201     try:
    202         if isinstance(value, Variable):
--> 203             raise TypeError(" Shared variable constructor needs numeric values and not symbolic variables.")
    204 
    205         for ctor in reversed(shared.constructors):

TypeError:  Shared variable constructor needs numeric values and not symbolic variables.

In [313]:
filtered_img.shape


Out[313]:
(1, 1, 637, 514)

In [297]:
img.shape


Out[297]:
(639, 516, 3)

In [298]:
deconv_W = W.transpose([1, 0, 2, 3])
deconv_W = deconv_W[:, :, ::-1, ::-1]

In [299]:
# instantiate 4D tensor for input
deconv_input = T.tensor4(name='deconv_input')
deconv_out = conv.conv2d(deconv_input, filters=deconv_W, border_mode='full')

In [300]:
deconv_f = theano.function([deconv_input], deconv_out)

In [301]:
deconv_img = deconv_f(filtered_img)

In [302]:
deconv_img.shape


Out[302]:
(1, 3, 639, 516)

In [310]:
pylab.subplot(1, 2, 1); pylab.imshow(img[:, :, :])
pylab.subplot(1, 2, 2); pylab.imshow(deconv_img[0].transpose((1, 2, 0)) / deconv_img.max()) #)[0, 0, :, :])


Out[310]:
<matplotlib.image.AxesImage at 0x7f8375602d90>

In [272]:
minimum = deconv_img[0].min()
deconv_img_rescaled = deconv_img[0].transpose(1, 2, 0) - minimum
maximum = deconv_img_rescaled.max()
deconv_img_rescaled = deconv_img_rescaled / maximum
pylab.imshow(deconv_img_rescaled[:,:,0])


Out[272]:
<matplotlib.image.AxesImage at 0x7f8377a2bb10>

In [274]:
minimum


Out[274]:
0.30811445678618993

In [246]:
filtered_img.shape


Out[246]:
(1, 1, 637, 514)

In [247]:
img


Out[247]:
array([[[ 0.08203125],
        [ 0.10677083],
        [ 0.09505208],
        ..., 
        [ 0.0703125 ],
        [ 0.06640625],
        [ 0.08463542]],

       [[ 0.0859375 ],
        [ 0.10026042],
        [ 0.08854167],
        ..., 
        [ 0.04947917],
        [ 0.0625    ],
        [ 0.07552083]],

       [[ 0.09635417],
        [ 0.10026042],
        [ 0.0859375 ],
        ..., 
        [ 0.05729167],
        [ 0.08854167],
        [ 0.07161458]],

       ..., 
       [[ 0.05729167],
        [ 0.078125  ],
        [ 0.05729167],
        ..., 
        [ 0.07161458],
        [ 0.08333333],
        [ 0.09114583]],

       [[ 0.0390625 ],
        [ 0.05338542],
        [ 0.046875  ],
        ..., 
        [ 0.06380208],
        [ 0.07161458],
        [ 0.07552083]],

       [[ 0.04296875],
        [ 0.04557292],
        [ 0.03776042],
        ..., 
        [ 0.07161458],
        [ 0.06770833],
        [ 0.05208333]]])

In [308]:
(deconv_img / 175).min()


Out[308]:
0.0025961977401517946

In [ ]: