In [1]:
# load matplotlib
%matplotlib inline
# imports
import numpy as np
import scipy.stats
import scipy.optimize as optim
# import the bayesian quadrature object
from bayesian_quadrature import BQ
from gp import GaussianKernel
# seed the numpy random generator, so we always get the same randomness
np.random.seed(8706)
First, we need to define various parameters:
In [2]:
options = {
'n_candidate': 10,
'x_mean': 0.0,
'x_var': 10.0,
'candidate_thresh': 0.5,
'kernel': GaussianKernel,
'optim_method': 'L-BFGS-B'
}
In [3]:
def plot(bq):
fig, axes = plt.subplots(1, 3)
xmin, xmax = -10, 10
bq.plot_gp_log_l(axes[0], f_l=f_y, xmin=xmin, xmax=xmax)
axes[0].set_ylim(-60, 2)
bq.plot_l(axes[1], f_l=f_y, xmin=xmin, xmax=xmax)
axes[1].set_ylim(-0.1, 0.5)
bq.plot_expected_variance(axes[2], xmin=xmin, xmax=xmax)
fig.set_figwidth(16)
fig.set_figheight(3.5)
Now, sample some random $x$ points and compute the $y$ points from a standard normal distribution.
In [4]:
x = np.random.uniform(-5, -5, 1)
f_y = lambda x: scipy.stats.norm.pdf(x, 0, 1)
y = f_y(x)
Create the bayesian quadrature object, and fit its parameters.
In [5]:
bq = BQ(x, y, **options)
bq.init(params_tl=(15, 2, 0), params_l=(0.2, 1.3, 0))
plot(bq)
In [6]:
def add(bq):
params = ['h', 'w']
x_a = np.sort(np.random.uniform(-10, 10, 20))
x = bq.choose_next(x_a, n=100, params=params)
print "x = %s" % x
mean = bq.Z_mean()
var = bq.Z_var()
print "E[Z] = %s" % mean
print "V(Z) = %s" % var
conf = 1.96 * np.sqrt(var)
lower = mean - conf
upper = mean + conf
print "Z = %.4f [%.4f, %.4f]" % (mean, lower, upper)
bq.add_observation(x, f_y(x))
bq.fit_hypers(params)
plot(bq)
In [7]:
add(bq)
In [8]:
add(bq)
In [9]:
add(bq)
In [10]:
add(bq)
In [11]:
add(bq)
In [12]:
add(bq)
In [13]:
add(bq)
In [14]:
add(bq)
In [15]:
add(bq)
In [16]:
add(bq)
In [17]:
add(bq)
In [18]:
add(bq)