In [1]:
from IPython.core.display import HTML, Image
from IPython.display import YouTubeVideo
from sympy import init_printing, Matrix, symbols, Rational
import sympy as sym
from warnings import filterwarnings
init_printing(use_latex = 'mathjax')
filterwarnings('ignore')
%pylab inline
import numpy as np
In [2]:
a11, a12, a13, a21, a22, a23, a31, a32, a33, b11, b12, b13, b21, b22, b23, b31, b32, b33 = symbols('a11 a12 a13 a21 a22 a23 a31 a32 a33 b11 b12 b13 b21 b22 b23 b31 b32 b33')
In [3]:
A = Matrix([[a11, a12, a13], [a21, a22, a23]])
B = Matrix([[b11, b12], [b21, b22], [b31, b32]])
A, B
Out[3]:
In [4]:
C = A * B
C
Out[4]:
In [5]:
A = Matrix([[3, 1], [1, 3]])
B = Matrix ([[1, 2], [1,4]])
C1 = A * B
C2 = B * A
C1, C2
Out[5]:
In [6]:
A = Matrix ([[1, 2], [3,4], [5,6]])
B = Matrix ([[1, 2], [3,4]])
A, A.transpose(), B, B.transpose()
Out[6]:
In [7]:
A = Matrix ([[1, 2], [2,1]])
A, A.transpose()
Out[7]:
rank($A$) (the rank of a m-by-n matrix $A$) is
col($A$), the column space of a m-by-n matrix $A$, is the set of all possible linear combinations of its column vectors.
row($A$), the row space of a m-by-n matrix $A$, is the set of all possible linear combinations of its row vectors.
rank($A$) = dimension of col($A$) = dimension of row($A$)
Perhaps the most common norm is the Euclidean norm $$ \|x\|_2 := \sqrt{x_1^2 + x_2^2 + \ldots x_n^2}$$
This is a special case of the $p$-norm: $$ \|x\|_p := \left(|x_1|^p + \ldots + |x_n|^p\right)^{1/p}$$
There's also the so-called infinity norm $$ \|x\|_\infty := \max_{i=1,\ldots, n} |x_i|$$
A vector $x$ is said to be normalized if $\|x\| = 1$
In [8]:
X = np.array([[3, 1], [1, 3]])
Matrix(X) # putting Matrix() around X is just for pretty printing
Out[8]:
In [9]:
Xinv = np.linalg.inv(X)
Matrix(Xinv), Matrix(Xinv.dot(X)), Matrix(X.dot(Xinv)) # Should give the identity matrix
Out[9]:
$|A| \neq 0$ if and only if $A$ is invertible (non-singular).
The trace of a matrix, denoted tr$(A)$, is defined as the sum of the diagonal elements of $A$
In [10]:
lamda = symbols('lamda') # Note that lambda is a reserved word in python, so we use lamda (without the b)
In [11]:
A = Matrix([[3, 1], [1, 3]])
I = sym.eye(2)
A, I # Printing A and the 2-by-2 identity matrix to the screen
Out[11]:
In [12]:
(A - lamda * I) # Printing A minus lambda times the identity matrix to the screen
Out[12]:
In [13]:
(A - lamda * I).det()
Out[13]:
In [14]:
((A - lamda * I).det()).factor()
Out[14]:
In [15]:
X = np.array([[3, 1], [1, 3]])
Matrix(X)
Out[15]:
In [16]:
eigenvals, eigenvecs = np.linalg.eig(X)
Matrix(eigenvecs)
Out[16]:
In [17]:
Matrix(eigenvals)
Out[17]:
In [18]:
X = np.random.randn(5,10)
A = X.dot(X.T) # For fun, let's look at A = X * X^T
eigenvals, eigvecs = np.linalg.eig(A) # Compute eigenvalues of A
sum_of_eigs = sum(eigenvals) # Sum the eigenvalues
trace_of_A = A.trace() # Look at the trace
(sum_of_eigs, trace_of_A) # Are they the same?
Out[18]:
In [19]:
# We'll use the same matrix A as before
prod_of_eigs = np.prod(eigenvals) # Sum the eigenvalues
determinant = np.linalg.det(A) # Look at the trace
(prod_of_eigs, determinant) # Are they the same?
Out[19]:
In [20]:
A = np.array([[4, 4], [-3, 3]])
Matrix(A)
Out[20]:
In [21]:
U, Sigma_diags, V = np.linalg.svd(A)
Matrix(np.diag(Sigma_diags)) # Numpy's SVD only returns diagonals, here I'm showing full Sigma
Out[21]:
In [22]:
U,V = np.round(U,decimals=5), np.round(V,decimals=5)
In [23]:
Matrix(U), Matrix(V) # I rounded the values for clarity
Out[23]:
$$\text{SVD:} \quad \quad A = U \Sigma V^\top$$
In [24]:
Image(url='https://upload.wikimedia.org/wikipedia/commons/e/e9/Singular_value_decomposition.gif')
Out[24]:
In [25]:
Image(url='http://www.probabilitycourse.com/images/chapter6/Convex_b.png', width=400)
Out[25]: