This notebook contains a Python implementation of the original examples from SPGL1 MATLAB solver
In [1]:
%load_ext autoreload
%autoreload 2
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import spdiags
from scipy.sparse.linalg import lsqr as splsqr
from spgl1.lsqr import lsqr
from spgl1 import spgl1, spg_lasso, spg_bp, spg_bpdn, spg_mmv
from spgl1.spgl1 import norm_l1nn_primal, norm_l1nn_dual, norm_l1nn_project
from spgl1.spgl1 import norm_l12nn_primal, norm_l12nn_dual, norm_l12nn_project
# Initialize random number generators
np.random.seed(43273289)
In [2]:
# Create random m-by-n encoding matrix and sparse vector
m = 50
n = 128
k = 14
[A,Rtmp] = np.linalg.qr(np.random.randn(n,m),'reduced')
A = A.T
p = np.random.permutation(n)
p = p[0:k]
x0 = np.zeros(n)
x0[p] = np.random.randn(k)
Solve the underdetermined LASSO problem for $||x||_1 <= \pi$:
$$min.||Ax-b||_2 \quad subject \quad to \quad ||x||_1 <= \pi$$
In [3]:
b = A.dot(x0)
tau = np.pi
x,resid,grad,info = spg_lasso(A, b, tau, verbosity=1)
print()
print('%s%s%s' % ('-'*35,' Solution ','-'*35))
print('nonzeros(x) = %i, ||x||_1 = %12.6e, ||x||_1 - pi = %13.6e' % \
(np.sum(abs(x)>1e-5), np.linalg.norm(x,1), np.linalg.norm(x,1)-np.pi))
print('%s' % ('-'*80))
Solve the basis pursuit (BP) problem:
$$min. ||x||_1 \quad subject \quad to \quad Ax = b$$
In [4]:
b = A.dot(x0) # signal
x,resid,grad,info = spg_bp(A, b, verbosity=2)
plt.figure()
plt.plot(x,'b')
plt.plot(x0,'ro')
plt.legend(('Recovered coefficients','Original coefficients'))
plt.title('Basis Pursuit');
In [5]:
plt.figure()
plt.plot(info['xnorm1'], info['rnorm2'], '.-k')
plt.xlabel(r'$||x||_1$')
plt.ylabel(r'$||r||_2$')
plt.title('Sampled Pareto curve')
plt.figure()
plt.plot(np.arange(info['niters']), info['rnorm2']/max(info['rnorm2']), '.-k')
plt.plot(np.arange(info['niters']), info['xnorm1']/max(info['xnorm1']), '.-r')
plt.xlabel(r'#iter')
plt.ylabel(r'$||r||_2 & ||x||_1$');
plt.title('Cost functions');
Solve the basis pursuit denoise (BPDN) problem:
$$min. ||x||_1 \quad subject \quad to \quad ||Ax - b||_2 <= 0.1$$
In [6]:
b = A.dot(x0) + np.random.randn(m) * 0.075
sigma = 0.10 # % Desired ||Ax - b||_2
x,resid,grad,info = spg_bpdn(A, b, sigma, iter_lim=10, verbosity=2)
plt.figure()
plt.plot(x,'b')
plt.plot(x0,'ro')
plt.legend(('Recovered coefficients','Original coefficients'))
plt.title('Basis Pursuit Denoise');
We repeat the same procedure but we have only positive elements in the x vector. We compare spgl1 with L1 norms and with L1NN norms
In [7]:
x0 = np.zeros(n)
x0[p] = np.abs(np.random.randn(k))
b = A.dot(x0) # signal
x,resid,grad,info = spg_bp(A, b, iter_lim=20, verbosity=1)
xnn,residnn,gradnn,infonn = spg_bp(A, b, iter_lim=20, verbosity=1,
project=norm_l1nn_project,
primal_norm=norm_l1nn_primal,
dual_norm=norm_l1nn_dual)
plt.figure()
plt.plot(x,'b')
plt.plot(xnn,'--g')
plt.plot(x0,'ro')
plt.legend(('Recovered coefficients', 'Recovered coefficients NNnorms','Original coefficients'))
plt.title('Basis Pursuit');
Solve the basis pursuit (BP) problem in COMPLEX variables:
$$min. ||z||_1 \quad subject \quad to \quad Az = b$$
In [8]:
from scipy.sparse.linalg import LinearOperator
class partialFourier(LinearOperator):
def __init__(self, idx, n):
self.idx = idx
self.n = n
self.shape = (len(idx), n)
self.dtype = np.complex128
def _matvec(self, x):
# % y = P(idx) * FFT(x)
z = np.fft.fft(x) / np.sqrt(n)
return z[idx]
def _rmatvec(self, x):
z = np.zeros(n,dtype=complex)
z[idx] = x
return np.fft.ifft(z) * np.sqrt(n)
# % Create partial Fourier operator with rows idx
idx = np.random.permutation(n)
idx = idx[0:m]
opA = partialFourier(idx, n)
# % Create sparse coefficients and b = 'A' * z0;
z0 = np.zeros(n,dtype=complex)
z0[p] = np.random.randn(k) + 1j * np.random.randn(k)
b = opA.matvec(z0)
z,resid,grad,info = spg_bp(opA,b, verbosity=2)
plt.figure()
plt.plot(z.real,'b+',markersize=15.0)
plt.plot(z0.real,'bo')
plt.plot(z.imag,'r+',markersize=15.0)
plt.plot(z0.imag,'ro')
plt.legend(('Recovered (real)', 'Original (real)', 'Recovered (imag)', 'Original (imag)'))
plt.title('Complex Basis Pursuit');
Sample the Pareto frontier at 100 points:
$$phi(tau) = min. ||Ax-b||_2 \quad subject \quad to \quad ||x|| <= \tau$$
In [9]:
b = A.dot(x0)
x = np.zeros(n)
tau = np.linspace(0, 1.05 * np.linalg.norm(x0, 1), 100)
tau[0] = 1e-10
phi = np.zeros(tau.size)
for i in range(tau.size):
x,r,grad,info = spgl1(A, b, tau[i], 0, x, iter_lim=1000)
phi[i] = np.linalg.norm(r)
plt.figure()
plt.plot(tau,phi, '.')
plt.title('Pareto frontier')
plt.xlabel('||x||_1')
plt.ylabel('||Ax-b||_2');
Solve
$$min. ||y||_1 \quad subject \quad to \quad AW^{-1}y = b$$and the weighted basis pursuit (BP) problem:
$$min. ||Wx||_1 \quad subject \quad to \quad Ax = b$$followed by setting $y = Wx$.
In [10]:
# Sparsify vector x0 a bit more to get exact recovery
k = 9
x0 = np.zeros(n)
x0[p[0:k]] = np.random.randn(k)
# Set up weights w and vector b
w = np.random.rand(n) + 0.1 # Weights
b = A.dot(x0/w) # Signal
# Solution
x,resid,grad,info = spg_bp(A, b, **dict(iter_lim=1000, weights=w))
# Reconstructed solution, with weighting
x1 = x * w
plt.figure()
plt.plot(x1,'b')
plt.plot(x0,'ro')
plt.legend(('Coefficients','Original coefficients'))
plt.title('Weighted Basis Pursuit');
Solve the multiple measurement vector (MMV) problem
$$(1) \quad min. ||Y||_{1,2} \quad subject \quad to \quad AW^{-1}Y = B$$and the weighted MMV problem (weights on the rows of X):
$$(2) \quad min. ||WX||_{1,2} \quad subject \quad to \quad AX = B$$followed by setting $Y = WX$.
In [11]:
# Create problem
m = 100
n = 150
k = 12
l = 6;
A = np.random.randn(m, n)
p = np.random.permutation(n)[:k]
X0 = np.zeros((n, l))
X0[p, :] = np.random.randn(k, l)
weights = 3 * np.random.rand(n) + 0.1
W = 1/weights * np.eye(n)
B = A.dot(W).dot(X0)
# Solve unweighted version
x_uw, _, _, _ = spg_mmv(A.dot(W), B, 0, **dict(verbosity=1))
# Solve weighted version
x_w, _, _, _ = spg_mmv(A, B, 0, **dict(verbosity=2, weights=weights))
x_w = spdiags(weights, 0, n, n).dot(x_w)
# Plot results
plt.figure()
plt.plot(x_uw[:, 0], 'b-', label='Coefficients (1)')
plt.plot(x_w[:, 0], 'g--', label='Coefficients (2)')
plt.plot(X0[:, 0], 'ro', label='Original coefficients')
plt.legend()
plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors');
plt.figure()
plt.plot(x_uw[:, 1], 'b', label='Coefficients (1)')
plt.plot(x_w[:, 1], 'g--', label='Coefficients (2)')
plt.plot(X0[:, 1], 'ro', label='Original coefficients')
plt.legend()
plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors');
In [12]:
# Create problem
m = 100
n = 150
k = 12
l = 6;
A = np.random.randn(m, n)
p = np.random.permutation(n)[:k]
X0 = np.zeros((n, l))
X0[p, :] = np.abs(np.random.randn(k, l))
B = A.dot(X0)
X, _, _, _ = spg_mmv(A, B, 0, iter_lim=10, verbosity=1)
XNN, _, _, _ = spg_mmv(A, B, 0, iter_lim=10, verbosity=1,
project=norm_l12nn_project,
primal_norm=norm_l12nn_primal,
dual_norm=norm_l12nn_dual)
print('Negative X:', np.any(X))
print('Negative XNN:', np.any(XNN))
# Plot results
plt.figure()
plt.plot(X[:, 0], 'b-', label='Coefficients')
plt.plot(XNN[:, 0], 'g--', label='Coefficients NN')
plt.plot(X0[:, 0], 'ro', label='Original coefficients')
plt.legend()
plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors');
plt.figure()
plt.plot(X[:, 1], 'b', label='Coefficients')
plt.plot(XNN[:, 1], 'g--', label='Coefficients NN')
plt.plot(X0[:, 1], 'ro', label='Original coefficients')
plt.legend()
plt.title('Weighted Basis Pursuit with Multiple Measurement Vectors');
Let's finally try to compare the internal lsqr with scipy lsqr and perform sgpl1 with subspace minimization
In [13]:
def Aprodfun(A, x, mode):
if mode == 1:
y = np.dot(A,x)
else:
return np.dot(np.conj(A.T), x)
return y
In [14]:
n = 10
m = 20
A = np.random.normal(0, 1, (m, n))
Aprod = lambda x, mode: Aprodfun(A, x, mode)
x = np.ones(n)
y = A.dot(x)
damp = 1e-5
aTol = 1e-5
bTol = 1e-5
conLim = 1e12
itnMaxLSQR = 100
showLSQR = 2
xinv, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var = \
lsqr(m, n, Aprod, y, damp, aTol, bTol, conLim, itnMaxLSQR, showLSQR)
xinv_sp, istop_sp, itn_sp, r1norm_sp, r2norm_sp, anorm_sp, acond_sp, arnorm_sp, xnorm_sp, var = \
splsqr(A, y, damp, aTol, bTol, conLim, itnMaxLSQR, showLSQR)
print('istop=%d, itn=%d, r1norm=%.2f, '
'r2norm=%.2f, anorm=%.2f, acond=%.2f, arnorm=%.2f, xnorm=%.2f' \
%(istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm))
print('istop=%d, itn=%d, r1norm=%.2f, '
'r2norm=%.2f, anorm=%.2f, acond=%.2f, arnorm=%.2f, xnorm=%.2f' \
%(istop_sp, itn_sp, r1norm_sp, r2norm_sp, anorm_sp, acond_sp, arnorm_sp, xnorm_sp))
plt.plot(x, lw=8)
plt.plot(xinv, '--g', lw=4)
plt.plot(xinv_sp, '--r')
plt.ylim(0, 2);
And use subspace minimization in SPGL1
In [15]:
# Create random m-by-n encoding matrix and sparse vector
np.random.seed(0)
m = 50
n = 128
k = 14
[A, Rtmp] = np.linalg.qr(np.random.randn(n,m),'reduced')
A = A.T
p = np.random.permutation(n)
p = p[0:k]
x0 = np.zeros(n)
x0[p] = np.random.randn(k)
# Basis pursuit with subspace minimization
b = A.dot(x0) # signal
x,resid,grad,info = spg_bp(A, b, subspace_min=False, verbosity=2)
x,resid,grad,info_sub = spg_bp(A, b, subspace_min=True, verbosity=2)
plt.figure()
plt.plot(np.arange(info['niters']), info['rnorm2']/max(info['rnorm2']), '.-k',
label='without subspace min')
plt.plot(np.arange(info_sub['niters']), info_sub['rnorm2']/max(info_sub['rnorm2']), '.-r',
label='with subspace min')
plt.xlabel(r'#iter')
plt.ylabel(r'$||r||_2$')
plt.legend();