In [1]:
%matplotlib inline
%config InlineBackend.figure_format = "retina"
from __future__ import print_function
from matplotlib import rcParams
rcParams["savefig.dpi"] = 100
rcParams["figure.dpi"] = 100
rcParams["font.size"] = 20
In [2]:
import george
george.__version__
Out[2]:
In [3]:
import kplr
import numpy as np
import matplotlib.pyplot as pl
client = kplr.API()
lc = client.light_curves(10295224, short_cadence=False, sci_data_quarter=3)[0]
data = lc.read()
x = data["TIME"]
y = data["SAP_FLUX"]
yerr = data["SAP_FLUX_ERR"]
mu = np.median(y[np.isfinite(y)])
y /= mu
yerr /= mu
pl.errorbar(x, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("time [days]")
pl.ylabel("relative brightness");
In [4]:
pl.errorbar(x % 2.9327280226002399, y, yerr=yerr, fmt=".k", capsize=0)
pl.xlabel("time [days]")
pl.ylabel("relative brightness");
In [5]:
from scipy.ndimage.measurements import label
sects, count = label(np.isfinite(x))
ranges = [(x[sects == s].min(), x[sects == s].max()) for s in range(1, count+1)]
ranges = [rng for rng in ranges if rng[1] > rng[0]]
print(ranges)
In [6]:
m = np.isfinite(x) & np.isfinite(y) & np.isfinite(yerr)
x = x[m]
y = y[m]
yerr = yerr[m]
In [7]:
from george import kernels
kernel = np.var(y) * kernels.ExpSine2Kernel(log_period=np.log(2.75), gamma=18.0) * kernels.ExpSquaredKernel(10.)
for rng in ranges:
kernel += np.var(y) * kernels.Matern32Kernel(50., block=[rng])
gp = george.GP(kernel, solver=george.HODLRSolver, mean=1.0)
gp.compute(x, yerr)
K = gp.get_matrix(x)
pl.imshow(K, cmap="gray", interpolation="nearest")
pl.gca().set_xticklabels([])
pl.gca().set_yticklabels([]);
In [8]:
names = gp.get_parameter_names()
[gp.freeze_parameter(n) for n in names[4:]]
gp.get_parameter_names()
Out[8]:
In [9]:
def nll0(p):
gp.set_parameter_vector(p)
return -gp.log_likelihood(y, quiet=True)
def grad_nll0(p):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y, quiet=True)
In [10]:
print(nll0(gp.get_parameter_vector()))
print(grad_nll0(gp.get_parameter_vector()))
In [11]:
from scipy.optimize import minimize
bounds = gp.get_parameter_bounds()
bounds[2] = (np.log(2.5), np.log(3.0))
result = minimize(nll0, gp.get_parameter_vector(), jac=grad_nll0, method="L-BFGS-B",
bounds=bounds)
print(result)
In [12]:
np.exp(result.x)
Out[12]:
In [13]:
gp.set_parameter_vector(result.x)
In [14]:
K = gp.get_matrix(x)
pl.imshow(K, cmap="gray", interpolation="nearest")
pl.gca().set_xticklabels([])
pl.gca().set_yticklabels([]);
In [15]:
mu = gp.predict(y, x, return_cov=False)
In [16]:
pl.plot(x, y, ".k")
pl.plot(x, mu, "g")
Out[16]:
In [17]:
gp.thaw_all_parameters()
In [18]:
gp.get_parameter_names()
Out[18]:
In [19]:
def update_gp_with_vector(p):
vector = np.concatenate([p[:4]] + [p[4:] for _ in ranges])
gp.set_parameter_vector(vector)
def nll(p):
update_gp_with_vector(p)
return -gp.lnlikelihood(y)
def grad_nll(p):
update_gp_with_vector(p)
grad = gp.grad_lnlikelihood(y)
n = 6
for _ in ranges[:-1]:
grad[4:6] += grad[n:n+2]
n += 2
return -grad[:6]
In [35]:
nll(gp.get_parameter_vector()[:6])
In [66]:
bounds = bounds[:4] + [(None, None), (2*np.log(3.0), None)]
result = minimize(nll, gp.get_vector()[:6], jac=grad_nll, method="L-BFGS-B",
bounds=bounds)
In [67]:
result
Out[67]:
In [68]:
update_gp_with_vector(result.x)
In [69]:
K = gp.get_matrix(x)
pl.imshow(np.log(K), cmap="gray", interpolation="nearest")
pl.gca().set_xticklabels([])
pl.gca().set_yticklabels([]);
In [70]:
gp.kernel
Out[70]:
In [71]:
mu = gp.predict(y, x, return_cov=False)
pl.plot(x, y, ".k")
pl.plot(x, mu, "g")
Out[71]:
In [73]:
K_0 = gp.get_matrix(x) - gp.kernel.k1.k1.k1.get_value(x[:, None])
In [74]:
pred = np.dot(K_0, gp.apply_inverse(y)) + 1.0
pl.plot(x, y-pred, ".g")
Out[74]:
In [75]:
period = np.exp(gp.kernel.k1.k1.k1.k1.k2.ln_period)
In [76]:
samp = gp.sample(x)
In [77]:
pl.plot(x, samp, ".k")
Out[77]:
In [78]:
gp.get_parameter_names()
Out[78]:
In [ ]: