In [1]:
# Import modules
import math
import sympy as sym
import numpy as np
import scipy
import matplotlib.pyplot as plt
import plotly
import plotly.plotly as ply
import plotly.figure_factory as ply_ff
from IPython.display import Math
from IPython.display import display
# Startup plotly
plotly.offline.init_notebook_mode(connected=True)
''' Fix MathJax issue '''
# The polling here is to ensure that plotly.js has already been loaded before
# setting display alignment in order to avoid a race condition.
from IPython.core.display import display, HTML
display(HTML(
'<script>'
'var waitForPlotly = setInterval( function() {'
'if( typeof(window.Plotly) !== "undefined" ){'
'MathJax.Hub.Config({ SVG: { font: "STIX-Web" }, displayAlign: "center" });'
'MathJax.Hub.Queue(["setRenderer", MathJax.Hub, "SVG"]);'
'clearInterval(waitForPlotly);'
'}}, 250 );'
'</script>'
))
In [53]:
# Parameters
x = 2
h = 0.1
# Symbolic computation
sym_x = sym.Symbol('x')
sym_deri_x1 = sym.diff(1 / sym_x, sym_x)
sym_deri_x1_num = sym_deri_x1.subs(sym_x, x).evalf()
# Approximation
f = lambda x : 1 / x
deri_x1 = (f(x + h) - f(x)) / h
# Comparison
print('approximate = %f, real value = %f, backward error = %f' %(deri_x1, sym_deri_x1_num, abs(deri_x1 - sym_deri_x1_num)) )
In [56]:
# Parameters
x = 2
h = 0.1
f = lambda x : 1 / x
# Symbolic computation
sym_x = sym.Symbol('x')
sym_deri_x1 = sym.diff(1 / sym_x, sym_x)
sym_deri_x1_num = sym_deri_x1.subs(sym_x, x).evalf()
# Approximation
deri_x1 = (f(x + h) - f(x - h)) / (2 * h)
# Comparison
print('approximate = %f, real value = %f, backward error = %f' %(deri_x1, sym_deri_x1_num, abs(deri_x1 - sym_deri_x1_num)) )
In [117]:
# Parameters
f = lambda x : math.exp(x)
real_value = 1
h_msg = "$10^{-%d}$"
twp_deri_x1 = lambda x, h : ( f(x + h) - f(x) ) / h
thp_deri_x1 = lambda x, h : ( f(x + h) - f(x - h) ) / (2 * h)
data = [
["h",
"$f'(x) \\approx \\frac{e^{x+h} - e^x}{h}$",
"error",
"$f'(x) \\approx \\frac{e^{x+h} - e^{x-h}}{2h}$",
"error"],
]
for i in range(1,10):
h = pow(10, -i)
twp_deri_x1_value = twp_deri_x1(0, h)
thp_deri_x1_value = thp_deri_x1(0, h)
row = ["", "", "", "", ""]
row[0] = h_msg %i
row[1] = '%.14f' %twp_deri_x1_value
row[2] = '%.14f' %abs(twp_deri_x1_value - real_value)
row[3] = '%.14f' %thp_deri_x1_value
row[4] = '%.14f' %abs(thp_deri_x1_value - real_value)
data.append(row)
table = ply_ff.create_table(data)
plotly.offline.iplot(table, show_link=False)
In [2]:
sym.init_printing(use_latex=True)
x = sym.Symbol('x')
dx = sym.diff(sym.exp(sym.sin(x)), x)
Math('Derivative : %s' %sym.latex(dx) )
Out[2]:
In [42]:
# Apply Trapezoid Rule
trapz = scipy.integrate.trapz([np.log(1), np.log(2)], [1, 2])
# Evaluate the error term of Trapezoid Rule
sym_x = sym.Symbol('x')
expr = sym.diff(sym.log(sym_x), sym_x, 2)
trapz_err = abs(expr.subs(sym_x, 1).evalf() / 12)
# Print out results
print('Trapezoid rule : %f and upper bound error : %f' %(trapz, trapz_err) )
In [59]:
# Apply Simpson's Rule
area = scipy.integrate.simps([np.log(1), np.log(1.5), np.log(2)], [1, 1.5, 2])
# Evaluate the error term
sym_x = sym.Symbol('x')
expr = sym.diff(sym.log(sym_x), sym_x, 4)
simps_err = abs( pow(0.5, 5) / 90 * expr.subs(sym_x, 1).evalf() )
# Print out results
print('Simpson\'s rule : %f and upper bound error : %f' %(area, simps_err) )
In [16]:
# Apply composite Trapezoid Rule
x = np.linspace(1, 2, 5)
y = np.log(x)
trapz = scipy.integrate.trapz(y, x)
# Error term
sym_x = sym.Symbol('x')
expr = sym.diff(sym.log(sym_x), sym_x, 2)
trapz_err = abs( (2 - 1) * pow(0.25, 2) / 12 * expr.subs(sym_x, 1).evalf() )
print('Trapezoid Rule : %f, error = %f' %(trapz, trapz_err) )
In [19]:
# Apply composite Trapezoid Rule
x = np.linspace(1, 2, 9)
y = np.log(x)
area = scipy.integrate.simps(y, x)
# Error term
sym_x = sym.Symbol('x')
expr = sym.diff(sym.log(sym_x), sym_x, 4)
simps_err = abs( (2 - 1) * pow(0.125, 4) / 180 * expr.subs(sym_x, 1).evalf() )
print('Simpson\'s Rule : %f, error = %f' %(area, simps_err) )
In [50]:
# Parameters
m = 10
h = (1 - 0) / m
f = lambda x : np.sin(x) / x
mids = np.arange(0 + h/2, 1, h)
# Apply composite midpoint rule
area = h * np.sum(f(mids))
# Error term
sym_x = sym.Symbol('x')
expr = sym.diff(sym.sin(sym_x) / sym_x, sym_x, 2)
mid_err = abs( (1 - 0) * pow(h, 2) / 24 * expr.subs(sym_x, 1).evalf() )
# Print out
print('Composite Midpoint Rule : %.8f, error = %.8f' %(area, mid_err) )
In [181]:
def romberg(f, a, b, step):
R = np.zeros(step * step).reshape(step, step)
R[0][0] = (b - a) * (f(a) + f(b)) / 2
for j in range(1, step):
h = (b - a) / pow(2, j)
summ = 0
for i in range(1, pow(2, j - 1) + 1):
summ += h * f(a + (2 * i - 1) * h)
R[j][0] = 0.5 * R[j - 1][0] + summ
for k in range(1, j + 1):
R[j][k] = ( pow(4, k) * R[j][k - 1] - R[j - 1][k - 1] ) / ( pow(4, k) - 1 )
return R[step - 1][step - 1]
In [183]:
f = lambda x : np.log(x)
result = romberg(f, 1, 2, 4)
print('Romberg Integration : %f' %(result) )
In [184]:
f = lambda x : np.log(x)
result = scipy.integrate.romberg(f, 1, 2, show=True)
print('Romberg Integration : %f' %(result) )
In [105]:
''' Use Trapezoid Rule '''
def adaptive_quadrature(f, a, b, tol):
return adaptive_quadrature_recursively(f, a, b, tol, a, b, 0)
def adaptive_quadrature_recursively(f, a, b, tol, orig_a, orig_b, deep):
c = (a + b) / 2
S = lambda x, y : (y - x) * (f(x) + f(y)) / 2
if abs( S(a, b) - S(a, c) - S(c, b) ) < 3 * tol * (b - a) / (orig_b - orig_a) or deep > 20 :
return S(a, c) + S(c, b)
else:
return adaptive_quadrature_recursively(f, a, c, tol / 2, orig_a, orig_b, deep + 1) + adaptive_quadrature_recursively(f, c, b, tol / 2, orig_a, orig_b, deep + 1)
In [106]:
''' Use Simpon's Rule '''
def adaptive_quadrature(f, a, b, tol):
return adaptive_quadrature_recursively(f, a, b, tol, a, b, 0)
def adaptive_quadrature_recursively(f, a, b, tol, orig_a, orig_b, deep):
c = (a + b) / 2
S = lambda x, y : (y - x) * ( f(x) + 4 * f((x + y) / 2) + f(y) ) / 6
if abs( S(a, b) - S(a, c) - S(c, b) ) < 15 * tol or deep > 20 :
return S(a, c) + S(c, b)
else:
return adaptive_quadrature_recursively(f, a, c, tol / 2, orig_a, orig_b, deep + 1) + adaptive_quadrature_recursively(f, c, b, tol / 2, orig_a, orig_b, deep + 1)
In [108]:
f = lambda x : 1 + np.sin(np.exp(3 * x))
val = adaptive_quadrature(f, -1, 1, tol=1e-12)
print(val)
In [28]:
poly = scipy.special.legendre(2)
# Find roots of polynomials
comp = scipy.linalg.companion(poly)
roots = scipy.linalg.eig(comp)[0]
In [29]:
f = lambda x : np.exp(-np.power(x, 2) / 2)
quad = scipy.integrate.quadrature(f, -1, 1)
print(quad[0])
In [50]:
# Parametes
a = -1
b = 1
deg = 3
f = lambda x : np.exp( -np.power(x, 2) / 2 )
x, w = scipy.special.p_roots(deg) # Or use numpy.polynomial.legendre.leggauss
quad = np.sum(w * f(x))
print(quad)
In [66]:
# Parametes
a = 1
b = 2
deg = 4
f = lambda t : np.log( ((b - a) * t + b + a) / 2) * (b - a) / 2
x, w = scipy.special.p_roots(deg)
np.sum(w * f(x))
Out[66]: