The idea here is to compare the various analytic functions with approximate solutions, to verify that the analytic implementation is correct. Ideally, we want to see two things:
As the number of approximation points increases, the error should go down and tend towards zero.
With a sufficient number of approximation points, the analytic and approximate solutions should appear visually identical.
In [1]:
    
# imports 
%matplotlib inline
from bayesian_quadrature import BQ, bq_c, gauss_c, util
from gp import GaussianKernel
# set loglevel to debug
import logging
logger = logging.getLogger("bayesian_quadrature")
logger.setLevel("DEBUG")
    
In [2]:
    
# seed the numpy random generator, so we always get the same randomness
np.random.seed(8728)
    
In [3]:
    
# parameters
options = {
    'n_candidate': 10,
    'x_mean': 0.0,
    'x_var': 10.0,
    'candidate_thresh': 0.5,
    'kernel': GaussianKernel,
    'optim_method': 'L-BFGS-B'
}
# different number of approximation points to try, to see if the
# approximation converges, and the range over which we will compute
# the approximation points
N = np.logspace(np.log10(30), np.log10(500), 100)
xmin, xmax = -10, 10
    
Here we define some helper functions to plot the error/visual comparisons, and to print information about the worst error and approximate/analytic solution values.
In [4]:
    
def plot_0d(calc, approx):
    fig, ax = plt.subplots()
    ax.plot(N, np.abs(calc - approx), lw=2, color='r')
    ax.set_title("Error")
    ax.set_xlabel("# approximation points")
    ax.set_xlim(N.min(), N.max())
    util.set_scientific(ax, -5, 4)
    
In [5]:
    
def print_0d(calc, approx):
    err = np.abs(calc - approx[-1])
    print "error : %s" % err
    print "approx: %s" % approx[-1]
    print "calc  : %s" % calc
    
In [6]:
    
def plot_1d(calc, approx):
    fig, (ax1, ax2) = plt.subplots(1, 2)
    ax1.plot(N, np.abs(calc[:, None] - approx).sum(axis=0), lw=2, color='r')
    ax1.set_title("Error")
    ax1.set_xlabel("# approximation points")
    ax1.set_xlim(N.min(), N.max())
    util.set_scientific(ax1, -5, 4)
    ax2.plot(approx[:, -1], lw=2, label="approx")
    ax2.plot(calc, '--', lw=2, label="calc")
    ax2.set_title("Comparison")
    ax2.legend(loc=0)
    util.set_scientific(ax2, -5, 4)
    fig.set_figwidth(8)
    plt.tight_layout()
    
In [7]:
    
def print_1d(calc, approx):
    maxerr = np.abs(calc - approx[..., -1]).max()
    approx_sum, calc_sum = approx[..., -1].sum(), calc.sum()
    print "max error  : %s" % maxerr
    print "sum(approx): %s" % approx_sum
    print "sum(calc)  : %s" % calc_sum
    
In [8]:
    
def plot_2d(calc, approx):
    fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4)
    ax1.plot(N, np.abs(calc[:, :, None] - approx).sum(axis=0).sum(axis=0), lw=2, color='r')
    ax1.set_title("Error")
    ax1.set_xlabel("# approximation points")
    ax1.set_xlim(N.min(), N.max())
    util.set_scientific(ax1, -5, 4)
    vmax = max(approx[:, :, -1].max(), calc.max())
    ax2.imshow(approx[:, :, -1], cmap='gray', interpolation='nearest', vmin=0, vmax=vmax)
    ax2.set_title("Approximation")
    ax3.imshow(calc, cmap='gray', interpolation='nearest', vmin=0, vmax=vmax)
    ax3.set_title("Calculated")
    i = 0
    ax4.plot(approx[i, :, -1], lw=2, label='approx')
    ax4.plot(calc[i], '--', lw=2, label='calc')
    ax4.set_title("Comparison at index %d" % i)
    ax4.legend(loc=0)
    util.set_scientific(ax4, -5, 4)
    fig.set_figwidth(14)
    plt.tight_layout()
    
Now we create the Bayesian Quadrature object that we'll be testing against.
In [9]:
    
# choose points and evaluate them under a standard normal distribution
x = np.linspace(-5, 5, 9)
f_y = lambda x: scipy.stats.norm.pdf(x, 0, 1)
y = f_y(x)
# create the bayesian quadrature object
bq = BQ(x, y, **options)
bq.init(
    params_tl=(15, 2, 0.), 
    params_l=(0.2, 1.3, 0.))
# plot the BQ approximation
fig, axes = bq.plot(f_l=f_y, xmin=xmin, xmax=xmax)
    
    
    
In [10]:
    
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
xmin, xmax = -10, 10
bq.plot_l(ax1, f_l=f_y, xmin=xmin, xmax=xmax)
bq.plot_expected_squared_mean(ax2, xmin=xmin, xmax=xmax)
bq.plot_expected_variance(ax3, xmin=xmin, xmax=xmax)
fig.set_figheight(8)
plt.tight_layout()
    
    
In [11]:
    
x_l = np.array(bq.gp_l.x[None], order='F')
h_l = bq.gp_l.K.h
w_l = np.array([bq.gp_l.K.w], order='F')
x_tl = np.array(bq.gp_log_l.x[None], order='F')
h_tl = bq.gp_log_l.K.h
w_tl = np.array([bq.gp_log_l.K.w], order='F')
xm = bq.options['x_mean']
xC = bq.options['x_cov']
    
In [12]:
    
calc = np.empty(bq.nsc, order='F')
gauss_c.int_K(calc, x_l, h_l, w_l, xm, xC)
approx = np.empty((bq.nsc, N.size), order='F')
for i, n in enumerate(N):
    xo = np.linspace(xmin, xmax, n)
    Kxxo = np.array(bq.gp_l.Kxxo(xo), order='F')
    gauss_c.approx_int_K(
        approx[:, i], 
        np.array(xo[None], order='F'),
        Kxxo, xm, xC)
    
In [13]:
    
plot_1d(calc, approx)
print_1d(calc, approx)
    
    
    
In [14]:
    
calc = np.empty((bq.nsc, bq.ns), order='F')
gauss_c.int_K1_K2(calc, x_l, x_tl, h_l, w_l, h_tl, w_tl, xm, xC)
approx = np.empty((bq.nsc, bq.ns, N.size), order='F')
for i, n in enumerate(N):
    xo = np.linspace(xmin, xmax, n)
    K1xxo = np.array(bq.gp_l.Kxxo(xo), order='F')
    K2xxo = np.array(bq.gp_log_l.Kxxo(xo), order='F')
    gauss_c.approx_int_K1_K2(
        approx[:, :, i], 
        np.array(xo[None], order='F'), 
        K1xxo, K2xxo, xm, xC)
    
In [15]:
    
plot_2d(calc, approx)
print_1d(calc, approx)
    
    
    
In [16]:
    
calc = np.empty((bq.nsc, bq.nsc), order='F')
gauss_c.int_int_K1_K2_K1(calc, x_l, h_l, w_l, h_tl, w_tl, xm, xC)
    
approx = np.empty((bq.nsc, bq.nsc, N.size), order='F')
for i, n in enumerate(N):
    xo = np.linspace(xmin, xmax, n)
    K1xxo = np.array(bq.gp_l.Kxxo(xo), order='F')
    K2xoxo = np.array(bq.gp_log_l.Kxoxo(xo), order='F')
    gauss_c.approx_int_int_K1_K2_K1(
        approx[:, :, i], 
        np.array(xo[None], order='F'),
        K1xxo, K2xoxo, xm, xC)
    
In [17]:
    
plot_2d(calc, approx)
print_1d(calc, approx)
    
    
    
In [18]:
    
calc = np.empty(bq.ns, order='F')
gauss_c.int_int_K1_K2(calc, x_tl, h_l, w_l, h_tl, w_tl, xm, xC)
approx = np.empty((bq.ns, N.size), order='F')
for i, n in enumerate(N):
    xo = np.linspace(xmin, xmax, n)
    K1xoxo = np.array(bq.gp_l.Kxoxo(xo), order='F')
    K2xxo = np.array(bq.gp_log_l.Kxxo(xo), order='F')
    gauss_c.approx_int_int_K1_K2(
        approx[:, i],
        np.array(xo[None], order='F'),
        K1xoxo, K2xxo, xm, xC)
    
In [19]:
    
plot_1d(calc, approx)
print_1d(calc, approx)
    
    
    
In [20]:
    
calc = gauss_c.int_int_K(1, h_l, w_l, xm, xC)
approx = np.empty(N.size, order='F')
for i, n in enumerate(N):
    xo = np.linspace(xmin, xmax, n)
    Kxoxo = np.array(bq.gp_l.Kxoxo(xo), order='F')
    approx[i] = gauss_c.approx_int_int_K(
        np.array(xo[None], order='F'), Kxoxo, xm, xC)
    
In [21]:
    
plot_0d(calc, approx)
print_0d(calc, approx)
    
    
    
In [22]:
    
calc = bq._exact_Z_mean()
approx = np.empty(N.size)
for i, n in enumerate(N):
    xo = np.linspace(xmin, xmax, n)
    approx[i] = bq._approx_Z_mean(xo)
    
In [23]:
    
plot_0d(calc, approx)
print_0d(calc, approx)
    
    
    
In [24]:
    
calc = bq._exact_Z_var()
approx = np.empty(N.size)
for i, n in enumerate(N):
    xo = np.linspace(xmin, xmax, n)
    approx[i] = bq._approx_Z_var(xo)
    
In [25]:
    
plot_0d(calc, approx)
print_0d(calc, approx)