In [1]:
# input: A
A = matrix([
[3, 1],
[2, -2]
])
# input: start vector v0
v0 = vector(
[1, 0]
)
# input number of iterations
n = 5
#
Approximieren des größten Eigenwertes.
$$ x_{s+1} = A \cdot x_s $$$$ \mu_s = \frac{\langle x_{s+1}, x_{s} \rangle}{\langle x_{s}, x_{s} \rangle} $$
In [2]:
MU = []
X = [v0]
for i in range(n):
x = A * X[-1]
X.append(x)
mui = (X[i+1].dot_product(X[i])) / (X[i].dot_product(X[i]))
MU.append(mui)
In [3]:
show(X)
In [4]:
show(MU)
In [5]:
show(RDF(MU[-1]))
In [6]:
show(max(A.eigenvalues()))
Approximieren des kleinsten Eigenwertes.
$$ x_{s+1} = A^{-1} \cdot x_s $$$$ \mu_s = \frac{\langle x_{s+1}, x_{s} \rangle}{\langle x_{s}, x_{s} \rangle} $$
In [7]:
Ainv = A^-1
show(Ainv)
In [8]:
MU = []
X = [v0]
for i in range(n):
x = Ainv * X[-1]
X.append(x)
mui = (X[i+1].dot_product(X[i])) / (X[i].dot_product(X[i]))
MU.append(mui)
show(X)
In [9]:
show(MU)
In [10]:
show(RDF(MU[-1]))
In [11]:
show(min(A.eigenvalues()))
Approximieren von einem beliebigen Eigenwert in der Nähe von $\alpha$.
$$ x_{s+1} = (A - \alpha I)^{-1} \cdot x_s $$$$ \mu_s = \frac{\langle x_{s+1}, x_{s} \rangle}{\langle x_{s}, x_{s} \rangle} $$
In [12]:
# input: alpha
alpha = 4
#
In [13]:
AaIinv = (A - (alpha * identity_matrix(*A.dimensions())))^-1
show(AaIinv)
In [14]:
MU = []
X = [v0]
for i in range(n):
x = AaIinv * X[-1]
X.append(x)
mui = (X[i+1].dot_product(X[i])) / (X[i].dot_product(X[i]))
MU.append(mui)
show(X)
In [15]:
show(MU)
In [16]:
show(RDF(MU[-1]))
In [17]:
show(min([(abs(e - alpha), e) for e in A.eigenvalues()])[1])