Eigendecomposition of matrices

In this notebook we'll look at the eigendecomposition of matrices. We will be following the lessons taught in the Deep Learning book from Goodfellow et al.


In [15]:
import numpy as np
import scipy as sci
from scipy import linalg as la

In [81]:
mat_dim = 5
A = np.mat(np.random.randint(low=1, high=10, size=(mat_dim, mat_dim)))

In [82]:
A


Out[82]:
matrix([[3, 6, 9, 4, 2],
        [7, 8, 5, 1, 1],
        [1, 9, 8, 1, 7],
        [5, 8, 2, 3, 1],
        [8, 8, 6, 6, 4]])

In [83]:
B = la.inv(A)
B


Out[83]:
array([[-0.09962676,  0.16221648, -0.04536319, -0.16681022,  0.1303474 ],
       [ 0.01550388, -0.03100775,  0.03875969,  0.2248062 , -0.12403101],
       [ 0.12919897,  0.0749354 , -0.01033592, -0.12661499, -0.03359173],
       [ 0.09417169, -0.22538042, -0.06086707,  0.1432673 ,  0.0799598 ],
       [-0.16681022, -0.03674993,  0.12001148, -0.14097043,  0.1678151 ]])

In [85]:
I = A * B
I


Out[85]:
matrix([[  1.00000000e+00,  -1.11022302e-16,   0.00000000e+00,
          -2.22044605e-16,  -1.66533454e-16],
        [  1.66533454e-16,   1.00000000e+00,  -8.32667268e-17,
          -2.22044605e-16,  -5.55111512e-17],
        [  0.00000000e+00,   1.11022302e-16,   1.00000000e+00,
           0.00000000e+00,  -2.22044605e-16],
        [  1.66533454e-16,   0.00000000e+00,  -1.52655666e-16,
           1.00000000e+00,  -2.77555756e-16],
        [  2.22044605e-16,  -2.22044605e-16,  -1.66533454e-16,
          -1.11022302e-16,   1.00000000e+00]])

In [88]:



Out[88]:
0.9999999999999994

In [ ]: