In [ ]:
import numpy as np
import sklearn.gaussian_process as gp
import matplotlib.pyplot as plt

In [ ]:
np.random.seed(100)

In [ ]:
f = lambda x: np.exp(-(x - 2)**2) + np.exp(-(x - 6)**2/10) + 1/ (x**2 + 1)
x_min = -2
x_max = 10

xs = np.linspace(x_min, x_max, 100)
ys = f(xs)

rxs = np.random.uniform(x_min, x_max, size=(6,1))
rys = f(rxs)

In [ ]:
plt.figure(figsize=(16,6))
plt.plot(xs, ys, 'g-')
plt.plot(rxs, rys, 'bo')
plt.show()

In [ ]:
gp_params = dict(
    alpha = 1e-10, # larger => more noise. Default = 1e-10
    # nu=1.5 assumes the target function is once-differentiable
    kernel = 1.0 * gp.kernels.Matern(nu=1.5),# + gp.kernels.WhiteKernel(),
    #kernel = 1.0 * gp.kernels.RBF(),
    n_restarts_optimizer = 10,
    # make the mean 0 (theoretically a bad thing, see docs, but can help)
    # with the constant offset in the kernel this shouldn't be required
    #normalize_y = True,
    copy_X_train = True # whether to make a copy of the training data (in-case it is modified)
)

gp_model = gp.GaussianProcessRegressor(**gp_params)
gp_model

In [ ]:
gp_model.fit(rxs, rys)

In [ ]:
gp_mu, gp_sigma = gp_model.predict(xs.reshape(-1,1), return_std=True)
gp_sigma = gp_sigma.reshape(-1,1)

In [ ]:
plt.figure(figsize=(16,6))
plt.plot(xs, ys, 'g-')
plt.plot(rxs, rys, 'bo')
plt.plot(xs, gp_mu, 'r-')
plt.fill_between(xs, gp_mu.flatten()-2*gp_sigma.flatten(), gp_mu.flatten()+2*gp_sigma.flatten(), alpha=0.3)
#plt.plot(xs, sample.flatten(), 'bo', color='black')
#plt.plot(xs2, sample2.flatten(), '-', color='black')
plt.show()

In [ ]:
rxs2 = np.vstack([rxs, np.random.uniform(x_min, x_max, size=(6,1))])
rys2 = f(rxs2)

In [ ]:
gp_model.set_params(optimizer=None, kernel=gp_model.kernel_)

In [ ]:
gp_model.kernel_

In [ ]:
gp_model.fit(rxs2, rys2)

In [ ]:
gp_model.kernel_

In [ ]:
gp_mu, gp_sigma = gp_model.predict(xs.reshape(-1,1), return_std=True)
gp_sigma = gp_sigma.reshape(-1,1)

In [ ]:
plt.figure(figsize=(16,6))
plt.plot(xs, ys, 'g-')
plt.plot(rxs2, rys2, 'bo')
plt.plot(xs, gp_mu, 'r-')
plt.fill_between(xs, gp_mu.flatten()-2*gp_sigma.flatten(), gp_mu.flatten()+2*gp_sigma.flatten(), alpha=0.3)
#plt.plot(xs, sample.flatten(), 'bo', color='black')
#plt.plot(xs2, sample2.flatten(), '-', color='black')
plt.show()

In [ ]:
sample = gp_model.sample_y(xs.reshape(-1,1), n_samples=1, random_state=0)

In [ ]:
%time xs = np.linspace(0, 5000, num=10000)
xs.shape

In [ ]:
import pdb
def f():
    pdb.set_trace()
    sample2 = gp_model.sample_y(xs.reshape(-1, 1), n_samples=1, random_state=0)
f()

In [ ]:
xs2 = np.linspace(x_min, x_max, 10).reshape(-1,1)
sample2 = gp_model.sample_y(xs2, n_samples=1, random_state=0)

In [ ]:
plt.figure(figsize=(16,6))
#plt.plot(xs, ys, 'g-')
plt.plot(rxs, rys, 'bo')
plt.plot(xs, gp_mu, 'r-')
#plt.fill_between(xs, gp_mu.flatten()-2*gp_sigma.flatten(), gp_mu.flatten()+2*gp_sigma.flatten(), alpha=0.3)
#plt.plot(xs, sample.flatten(), 'bo', color='black')
plt.plot(xs2.flatten(), sample2.flatten(), 'r-', color='black')
plt.show()

In [ ]: