We recall some preliminary definitions; the definition of Lagrange polynomials:
In [19]:
import numpy as np
def l(q, i, x):
"""Returns the ith Lagrangian polynomial associated to the grid q,
evaluated in x"""
return np.prod([(x - q[j])/(q[i] - q[j])
for j in range(len(q)) if j!=i])
the definition of a Lagrangian interpolation of a function $f$:
In [20]:
def L(q, f, x):
"""Returns the Lagrangian interpolation of f associated to the grid q,
evaluated in x"""
return sum([f(q[i])*l(q,i,x) for i in range(len(q))])
the definition of the Bernstein polynomial $B(i,n)$:
In [21]:
from scipy.special import binom
def B1(i,n,t):
"""Returns the Bernstein polynomial (i,n) evaluated in t,
using the binomial definition"""
if i < 0 or i > n:
return 0
return binom(n,i) * t**i * (1-t)**(n-i)
and the definition of the Bernstein approximation of a function $f$:
In [22]:
def B(n,f,x):
"""Returns the Bernstein approximation of order n of the function f,
evaluated in x"""
return sum([B1(i,n,x) * f(i/n) for i in np.arange(0, n+1, 1)])
To find the $L^{\infty}$ norm of the error we first implement a brute force search of the max of a function in a small step grid:
In [23]:
def bruteMax(f, st):
"""Returns the maximum of f attained on the points of a equispaced
grid in [0,1] of step st"""
grid = np.arange(0, 1+st, st)
return max([f(x) for x in grid])
We implement the algorithm for the $L^{\infty}$ norm error estimate for Lagrange interpolation:
In [24]:
def supErrorL(f, step, interpGrid):
"""Returns the sup norm, approximated with bruteMax(f, step), of the
difference between f and its lagrangian interpolation at interpGrid"""
error = lambda x : abs(f(x) - L(interpGrid, f, x))
return bruteMax(error, step)
Therefore the $L^{\infty}$ error estimate for Lagrangian interpolation of order $n$ at equispaced points is:
In [25]:
def supErrorLEquisp(f, step, n):
"""Returns the sup norm, approximated with bruteMax(f, step), of the
difference between f and its lagrangian interpolation with equispaced nodes"""
grid = np.arange(0, 1 + 1/n, 1/n) # estremi 0,1 inclusi
return supErrorL(f, step, grid)
while for Chebyshev points we have:
In [26]:
def supErrorLCheb(f, step, n):
"""Returns the sup norm, approximated with bruteMax(f, step), of the
difference between f and its lagrangian interpolation with Chebyshev nodes"""
grid = [np.cos(np.pi * (2*k - 1)/(2*n))/2 +.5 for k in np.arange(1, n+1 ,1)]
return supErrorL(f, step, grid)
Similarly we implement the algorithm for the $L^{\infty}$ norm estimate for Bernstein interpolation:
In [27]:
def supErrorB(f, step, n):
"""Returns the sup norm, approximated with bruteMax(f, step), of the
difference between f and its Bernstein interpolation of order n"""
error = lambda x : abs(f(x) - B(n, f, x))
return bruteMax(error, step)
We now plot the results for some special functions.
a) $x \mapsto \sin(2 \pi x)$
In [28]:
from matplotlib import pylab as plt
%matplotlib inline
f = lambda x : np.sin(2 * np.pi * x)
step = 2**(-8) # the step used for brute max search
maxInterpNodes = 64.
interpN = [h for h in np.arange(1, maxInterpNodes + 1, 1)]
plt.semilogy(interpN,
[supErrorLEquisp(f, step, nNodes) for nNodes in interpN])
plt.title("x -> sin(2 pi x)\nLagrangian equispaced error (log) vs n. interpolation nodes (linear)")
plt.show()
In [29]:
plt.semilogy(interpN,
[supErrorLCheb(f, step, nNodes) for nNodes in interpN])
plt.title("x -> sin(2 pi x)\nLagrangian Chebyshev error (log) vs n. interpolation nodes (linear)")
plt.show()
In [30]:
interpN = [h for h in np.arange(1, 1000, 10.)]
plt.semilogy(interpN,
[supErrorB(f, step, nNodes) for nNodes in interpN])
plt.title("x -> sin(2 pi x)\nBernstein error (log) vs n. interpolation nodes (linear)")
plt.show()
b) $x \mapsto \dfrac{1}{1 + 25x^2}$
In [31]:
f = lambda x : 1/(1 + (10*x - 5)**2)
interpN = [h for h in np.arange(1, maxInterpNodes + 1, 1)]
plt.semilogy(interpN,
[supErrorLEquisp(f, step, nNodes) for nNodes in interpN])
plt.title("Runge funct.\nLagrangian equispaced error (log) vs n. interpolation nodes (linear)")
plt.show()
In [32]:
plt.semilogy(interpN,
[supErrorLCheb(f, step, nNodes) for nNodes in interpN])
plt.title("Runge funct.\nLagrangian Chebyshev error (log) vs n. interpolation nodes (linear)")
plt.show()
In [33]:
interpN = [h for h in np.arange(1, 1000, 10.)]
plt.semilogy(interpN,
[supErrorB(f, step, nNodes) for nNodes in interpN])
plt.title("Runge funct.\nBernstein error (log) vs n. interpolation nodes (linear)")
plt.show()
c) $x \mapsto |x|$
In [34]:
f = lambda x : abs(x)
interpN = [h for h in np.arange(1, maxInterpNodes + 1, 1)]
plt.semilogy(interpN,
[supErrorLEquisp(f, step, nNodes) for nNodes in interpN])
plt.title("x -> |x|\nLagrangian equispaced error (log) vs n. interpolation nodes (linear)")
plt.show()
In [35]:
plt.semilogy(interpN,
[supErrorLCheb(f, step, nNodes) for nNodes in interpN])
plt.title("x -> |x|\nLagrangian Chebyshev error (log) vs n. interpolation nodes (linear)")
plt.show()
In [36]:
interpN = [h for h in np.arange(1, 1000, 10.)]
plt.semilogy(interpN,
[supErrorB(f, step, nNodes) for nNodes in interpN])
plt.title("x -> |x|\nBernstein error (log) vs n. interpolation nodes (linear)")
plt.show()