In [1]:
from __future__ import division
import time
import numpy as np
from scipy import misc
import scipy.linalg as lin
import pylab as pl
from locore.algorithms import admm
from pyprox.operators import dual_prox,soft_thresholding
Load image, downsample and convert to a float
In [2]:
im = misc.lena()
im = misc.imresize(im, (256, 256)).astype(np.float) / 255.
n = im.shape[0]
imshow(im, 'gray')
Out[2]:
Noisy observations
In [3]:
sigma = 0.06
y = im + sigma * np.random.randn(n, n)
imshow(y, 'gray')
Out[3]:
Regularization parameter
In [4]:
alpha = 0.1
Gradient and divergence with periodic boundaries
In [5]:
def gradient(x):
g = np.zeros((x.shape[0], x.shape[1], 2))
g[:, :, 0] = np.roll(x, -1, axis=0) - x
g[:, :, 1] = np.roll(x, -1, axis=1) - x
return g
def divergence(p):
px = p[:, :, 0]
py = p[:, :, 1]
resx = px - np.roll(px, 1, axis=0)
resy = py - np.roll(py, 1, axis=1)
return -(resx + resy)
Operators
In [6]:
# Minimization of F(K*x) + G(x)
K = gradient
K.T = divergence
amp = lambda u: np.sqrt(np.sum(u ** 2, axis=2))
F = lambda u: alpha * np.sum(amp(u))
G = lambda x: 1 / 2 * lin.norm(y - x, 'fro') ** 2
# Proximity operators
normalize = lambda u: u / np.tile(
(np.maximum(amp(u), 1e-10))[:, :, np.newaxis],
(1, 1, 2))
prox_f = lambda u, tau: np.tile(
soft_thresholding(amp(u), alpha * tau)[:, :, np.newaxis],
(1, 1, 2)) * normalize(u)
prox_fs = dual_prox(prox_f)
prox_g = lambda x, tau: (x + tau * y) / (1 + tau)
callback = lambda x: G(x) + F(K(x))
Run
In [7]:
t1 = time.time()
x_rec, cx = admm(prox_fs, prox_g, K, y,
maxiter=300, full_output=1, callback=callback)
t2 = time.time()
print "Performed 300 iterations in " + str(t2 - t1) + " seconds."
Show
In [8]:
pl.subplot(221)
pl.imshow(im, cmap='gray')
pl.title('Original')
pl.axis('off')
pl.subplot(222)
pl.imshow(y, cmap='gray')
pl.title('Noisy')
pl.axis('off')
pl.subplot(223)
pl.imshow(x_rec, cmap='gray')
pl.title('TV Regularization')
pl.axis('off')
pl.subplot(224)
fplot = pl.plot(cx)
pl.title('Objective versus iterations')
pl.show()