In [1]:
# load matplotlib
%matplotlib inline
# imports
import numpy as np
import scipy.stats
# import the bayesian quadrature object
from bayesian_quadrature import BQ
from gp import PeriodicKernel
# seed the numpy random generator, so we always get the same randomness
np.random.seed(8706)
First, we need to define various parameters:
In [2]:
options = {
'n_candidate': 5,
'x_mean': 0.0,
'x_var': 10.0,
'candidate_thresh': 0.5,
'kernel': PeriodicKernel,
'optim_method': 'L-BFGS-B'
}
Now, sample some random $x$ points and compute the $y$ points from a standard normal distribution.
In [3]:
def vmpdf(x, mu, kappa):
C = -np.log(2 * np.pi * scipy.special.iv(0, kappa))
p = np.exp(C + (kappa * np.cos(x - mu)))
return p
In [4]:
x = np.array([-1.75, -1, 1.25])
f_y = lambda x: vmpdf(x, 0.1, 1.1)
y = f_y(x)
Create the bayesian quadrature object, and fit its parameters.
In [5]:
bq = BQ(x, y, **options)
bq.init(params_tl=(5, 2*np.pi, 1, 0), params_l=(0.2, np.pi / 2., 1, 0))
bq.fit_hypers(['h', 'w'])
Plot the result.
In [6]:
fig, axes = bq.plot(f_y, xmin=-10, xmax=10)
Print out the mean and variance of the integral, $Z$:
In [7]:
bq._make_approx_x(xmin=0, xmax=2 * np.pi, n=1000)
print "E[Z] = %f" % bq.Z_mean()
print "V(Z) = %f" % bq.Z_var()
Compute the expected variance given a potential new observation, and then plot the curve of the expected variances, along with the final approximation.
In [8]:
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True)
xmin, xmax = -10, 10
bq.plot_l(ax1, f_l=f_y, xmin=xmin, xmax=xmax)
bq.plot_expected_squared_mean(ax2, xmin=xmin, xmax=xmax)
bq.plot_expected_variance(ax3, xmin=xmin, xmax=xmax)
fig.set_figheight(8)
plt.tight_layout()