In [5]:
# Import modules
import sys
import numpy as np
from scipy import linalg
from scipy.sparse import linalg as slinalg
In [5]:
def naive_gaussian_elimination(matrix):
"""
A simple gaussian elimination to solve equations
Args:
matrix : numpy 2d array
Returns:
mat : The matrix processed by gaussian elimination
x : The roots of the equation
Raises:
ValueError:
- matrix is null
RuntimeError :
- Zero pivot encountered
"""
if matrix is None :
raise ValueError('args matrix is null')
#Clone the matrix
mat = matrix.copy()
# Row Size
n = mat.shape[0]
# Column Size
m = mat.shape[1]
# Gaussian Elimaination
for j in xrange(0, n):
if abs(mat[j , j]) == 0 :
raise RuntimeError('zero pivot encountered')
for i in xrange(j + 1, n):
mult = mat[i, j] / mat[j, j]
for k in xrange(j,m):
mat[i, k] = mat[i, k] - mult * mat[j, k]
# Back Substitution
x = np.zeros(n, dtype=np.float)
for i in xrange(n - 1,-1,-1):
for j in xrange(i + 1, n):
mat[i, m-1] = mat[i ,m-1] - mat[i,j] * x[j]
x[i] = mat[i, m-1] / mat[i, i]
return mat, x
In [6]:
"""
Input:
[[ 1 2 -1 3]
[ 2 1 -2 3]
[-3 1 1 -6]]
"""
input_mat = np.array([ 1, 2, -1, 3, 2, 1, -2, 3, -3, 1, 1, -6],dtype=np.float)
input_mat = input_mat.reshape(3, 4)
output_mat, x = naive_gaussian_elimination(input_mat)
print('----------------------')
print(output_mat)
print('----------------------')
print(x)
In [11]:
A = np.array([ 1, 2, -1, 2, 1, -2, -3, 1, 1],dtype=np.float)
A = A.reshape(3, 3)
lu, piv = linalg.lu_factor(A)
b = np.array([3, 3, -6])
sol = linalg.lu_solve([lu, piv], b)
print(sol)
In [24]:
A = np.array([3, 1, 2, 6, 3, 4, 3, 1, 5], dtype=np.float).reshape(3,3)
P, L, U = linalg.lu(A)
print(A)
print('--------------------')
print(L)
print('--------------------')
print(U)
print('--------------------')
print(np.matmul(P,np.matmul(L, U)))
In [27]:
A = np.array([3, 1, 2, 6, 3, 4, 3, 1, 5], dtype=np.float).reshape(3,3)
b = np.array([0, 1, 3])
lu, piv = linalg.lu_factor(A)
x = linalg.lu_solve([lu, piv], b)
print(x)
In [2]:
A = np.array([1, -1, 3, -1, 0, -2, 2, 2, 4]).reshape(3, 3)
b = np.array([-3, 1, 0])
lu, piv = linalg.lu_factor(A)
x = linalg.lu_solve([lu, piv], b)
print(x)
In [7]:
"""
[[2, 3]
[3, 2]]
"""
A = np.array([2, 3, 3, 2]).reshape(2, 2)
b = np.array([4, 1])
lu, piv = linalg.lu_factor(A)
x = linalg.lu_solve([lu, piv], b)
print(x)
In [31]:
def jacobi_method(A, b, x0, k):
"""
Use jacobi method to solve equations
Args:
A (numpy 2d array): the matrix
b (numpy 1d array): the right hand side vector
x0 (numpy 1d array): initial guess
k (real number): iterations
Return:
The approximate solution
Exceptions:
ValueError
The size of matrix's column is not equal to the size of vector's size
"""
if A.shape[1] is not x0.shape[0] :
raise ValueError('The size of the columns of matrix A must be equal to the size of the x0')
D = np.diag(A.diagonal())
inv_D = linalg.inv(D)
LU = A - D
xk = x0
for _ in range(k):
xk = np.matmul(b - np.matmul(LU, xk), inv_D)
return xk
In [34]:
A = np.array([3, 1, 1, 2]).reshape(2, 2)
b = np.array([5, 5])
x = jacobi_method(A, b, np.array([0, 0]), 20)
print('x = %s' %x)
In [47]:
def gauss_seidel_method(A, b, x0, k):
"""
Use gauss seidel method to solve equations
Args:
A (numpy 2d array): the matrix
b (numpy 1d array): the right hand side vector
x0 (numpy 1d array): initial guess
k (real number): iterations
Return:
The approximate solution
Exceptions:
ValueError
The size of matrix's column is not equal to the size of vector's size
"""
if A.shape[1] is not x0.shape[0] :
raise ValueError('The size of the columns of matrix A must be equal to the size of the x0')
D = np.diag(A.diagonal())
L = np.tril(A) - D
U = np.triu(A) - D
inv_LD = linalg.inv(L + D)
xk = x0
for _ in range(k):
xk = np.matmul(inv_LD, -np.matmul(U, xk) + b)
return xk
In [48]:
A = np.array([3, 1, -1, 2, 4, 1, -1, 2, 5]).reshape(3, 3)
b = np.array([4, 1, 1])
x0 = np.array([0, 0, 0])
gauss_seidel_method(A, b, x0, 24)
Out[48]:
In [49]:
def gauss_seidel_sor_method(A, b, w, x0, k):
"""
Use gauss seidel method with sor to solve equations
Args:
A (numpy 2d array): the matrix
b (numpy 1d array): the right hand side vector
w (real number): weight
x0 (numpy 1d array): initial guess
k (real number): iterations
Return:
The approximate solution
Exceptions:
ValueError
The size of matrix's column is not equal to the size of vector's size
"""
if A.shape[1] is not x0.shape[0] :
raise ValueError('The size of the columns of matrix A must be equal to the size of the x0')
D = np.diag(A.diagonal())
L = np.tril(A) - D
U = np.triu(A) - D
inv_LD = linalg.inv(w * L + D)
xk = x0
for _ in range(k):
xk = np.matmul(w * inv_LD, b) + np.matmul(inv_LD, (1 - w) * np.matmul(D, xk) - w * np.matmul(U, xk))
return xk
In [63]:
A = np.array([3, 1, -1, 2, 4, 1, -1, 2, 5]).reshape(3, 3)
b = np.array([4, 1, 1])
x0 = np.array([0, 0, 0])
w = 1.25
gauss_seidel_sor_method(A, b, w, x0, 14)
Out[63]:
In [74]:
A = np.array([4, -2, 2, -2, 2, -4, 2, -4, 11]).reshape(3, 3)
R = linalg.cholesky(A)
print(R)
In [8]:
def conjugate_gradient_method(A, b, x0, k):
"""
Use conjugate gradient to solve linear equations
Args:
A : input matrix
b : input right hand side vector
x0 : initial guess
k : iteration
Returns:
approximate solution
"""
xk = x0
dk = rk = b - np.matmul(A, x0)
for _ in range(k):
if not np.any(rk) or all( abs(i) <= 1e-16 for i in rk) is True:
break
ak = float(np.matmul(rk.T, rk)) / float(np.matmul(dk.T, np.matmul(A, dk)))
xk = xk + ak * dk
rk1 = rk - ak * np.matmul(A, dk)
bk = np.matmul(rk1.T, rk1) / np.matmul(rk.T, rk)
dk = rk1 + bk * dk
rk = rk1
return xk
In [31]:
A = np.array([2, 2, 2, 5]).reshape(2, 2)
b = np.array([6, 3])
x0 = np.array([0, 0])
conjugate_gradient_method(A, b, x0, 2)
Out[31]:
In [32]:
A = np.array([1, -1, 0, -1, 2, 1, 0, 1, 2]).reshape(3, 3)
b = np.array([0, 2, 3])
x0 = np.array([0, 0, 0])
conjugate_gradient_method(A, b, x0, 10)
Out[32]:
In [20]:
A = np.array([1, -1, 0, -1, 2, 1, 0, 1, 5]).reshape(3, 3)
b = np.array([3, -3, 4])
x0 = np.array([0, 0, 0])
x = slinalg.cg(A, b, x0)[0]
print('x = %s' %x )
In [42]:
def multivariate_newton_method(fA, fDA, x0, k):
"""
Args:
fA (function handle) : coefficient matrix with arguments
fDA (function handle) : right-hand side vector with arguments
x0 (numpy 2d array) : initial guess
k (real number) : iteration
Return:
Approximate solution xk after k iterations
"""
xk = x0
for _ in range(k):
lu, piv = linalg.lu_factor(fDA(*xk))
s = linalg.lu_solve([lu, piv], -fA(*xk))
xk = xk + s
return xk
In [43]:
fA = lambda u,v : np.array([v - pow(u, 3), pow(u, 2) + pow(v, 2) - 1], dtype=np.float64)
fDA = lambda u,v : np.array([-3 * pow(u, 2), 1, 2 * u, 2 * v], dtype=np.float64).reshape(2, 2)
x0 = np.array([1, 2])
multivariate_newton_method(fA, fDA, x0, 10)
Out[43]:
In [51]:
fA = lambda u,v : np.array([6 * pow(u, 3) + u * v - 3 * pow(v, 3) - 4,
pow(u, 2) - 18 * u * pow(v, 2) + 16 * pow(v, 3) + 1],
dtype=np.float64)
fDA = lambda u,v : np.array([18 * pow(u, 2) + v,
u - 9 * pow(v, 2),
2 * u - 18 * pow(v, 2),
-36 * u * v + 48 * pow(v, 2)],
dtype=np.float64).reshape(2, 2)
x0 = np.array([2, 2], dtype=np.float64)
multivariate_newton_method(fA, fDA, x0, 5)
Out[51]: