In [1]:
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
%matplotlib inline
In [2]:
plt.style.use('ggplot')
In [3]:
A = np.array([[1,2],[3,4]])
A
Out[3]:
In [4]:
b = np.array([3,17])
b
Out[4]:
In [5]:
x = la.solve(A, b)
x
Out[5]:
In [6]:
np.allclose(A @ x, b)
Out[6]:
In [7]:
A1 = np.random.random((1000,1000))
b1 = np.random.random(1000)
In [8]:
%timeit la.solve(A1, b1)
In [9]:
%timeit la.inv(A1) @ b1
The solve function uses the dgesv fortran function to do the actual work. Here is an example of how to do this directly with the lapack function. There is rarely any reason to use blas or lapack functions directly becuase the linalg package provides more convenient functions that also perfrom error checking, but you can use Python to experiment with lapack or blas before using them in a language like C or Fortran.
In [10]:
import scipy.linalg.lapack as lapack
In [11]:
lu, piv, x, info = lapack.dgesv(A, b)
x
Out[11]:
In [12]:
C = np.array([[1, 2+3j], [3-2j, 4]])
C
Out[12]:
In [13]:
C.conjugate()
Out[13]:
In [14]:
def trace(M):
return np.diag(M).sum()
In [15]:
trace(C)
Out[15]:
In [16]:
np.allclose(trace(C), la.eigvals(C).sum())
Out[16]:
In [17]:
la.det(C)
Out[17]:
In [18]:
np.linalg.matrix_rank(C)
Out[18]:
In [19]:
la.norm(C, None) # Frobenius (default)
Out[19]:
In [20]:
la.norm(C, 2) # largest sinular value
Out[20]:
In [21]:
la.norm(C, -2) # smallest singular value
Out[21]:
In [22]:
la.svdvals(C)
Out[22]:
In [23]:
la.solve(A, b)
Out[23]:
In [24]:
x, resid, rank, s = la.lstsq(A, b)
x
Out[24]:
In [25]:
A1 = np.array([[1,2],[2,4]])
A1
Out[25]:
In [26]:
b1 = np.array([3, 17])
b1
Out[26]:
In [27]:
try:
la.solve(A1, b1)
except la.LinAlgError as e:
print(e)
In [28]:
x, resid, rank, s = la.lstsq(A1, b1)
x
Out[28]:
In [29]:
A2 = np.random.random((10,3))
b2 = np.random.random(10)
In [30]:
try:
la.solve(A2, b2)
except ValueError as e:
print(e)
In [31]:
x, resid, rank, s = la.lstsq(A2, b2)
x
Out[31]:
One way to solve least squares equations $X\beta = y$ for $\beta$ is by using the formula $\beta = (X^TX)^{-1}X^Ty$ as you may have learnt in statistical theory classes (or can derive yourself with a bit of calculus). This is implemented below.
Note: This is not how the la.lstsq function solves least square problems as it can be inefficent for large matrices.
In [33]:
def least_squares(X, y):
return la.solve(X.T @ X, X.T @ y)
In [34]:
least_squares(A2, b2)
Out[34]:
In [35]:
A = np.array([[1,0.6],[0.6,4]])
A
Out[35]:
In [36]:
p, l, u = la.lu(A)
In [37]:
p
Out[37]:
In [38]:
l
Out[38]:
In [39]:
u
Out[39]:
In [40]:
np.allclose(p@l@u, A)
Out[40]:
In [41]:
U = la.cholesky(A)
U
Out[41]:
In [42]:
np.allclose(U.T @ U, A)
Out[42]:
In [43]:
# If workiing wiht complex matrices
np.allclose(U.T.conj() @ U, A)
Out[43]:
In [44]:
Q, R = la.qr(A)
In [45]:
Q
Out[45]:
In [46]:
np.allclose((la.norm(Q[:,0]), la.norm(Q[:,1])), (1,1))
Out[46]:
In [ ]:
In [47]:
np.allclose(Q@R, A)
Out[47]:
In [48]:
u, v = la.eig(A)
In [49]:
u
Out[49]:
In [50]:
v
Out[50]:
In [51]:
np.allclose((la.norm(v[:,0]), la.norm(v[:,1])), (1,1))
Out[51]:
In [52]:
np.allclose(v @ np.diag(u) @ v.T, A)
Out[52]:
In [53]:
np.allclose(v @ np.diag(1/u) @ v.T, la.inv(A))
Out[53]:
In [54]:
np.allclose(v @ np.diag(u**5) @ v.T, np.linalg.matrix_power(A, 5))
Out[54]:
In [55]:
U, s, V = la.svd(A)
In [56]:
U
Out[56]:
In [57]:
np.allclose((la.norm(U[:,0]), la.norm(U[:,1])), (1,1))
Out[57]:
In [58]:
s
Out[58]:
In [59]:
V
Out[59]:
In [60]:
np.allclose((la.norm(V[:,0]), la.norm(V[:,1])), (1,1))
Out[60]:
In [61]:
np.allclose(U @ np.diag(s) @ V, A)
Out[61]:
In [ ]: