In [1]:
from __future__ import division, print_function
In [2]:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
In [3]:
import george
from george import kernels
import pandas as pd
from astropy.stats import median_absolute_deviation as MAD
In [68]:
fn = '../data/215900519P-ep20160806.csv'
df = pd.read_csv(fn, skiprows=39)
df['time'] = df.t - df.t[0]
df['flux'] = df.fdt_t_roll_2D/ np.median(df.fdt_t_roll_2D) - 1.0
df['ferr'] = np.ones_like(df.t) * MAD(df.flux) /10
print(np.shape(df.time), np.shape(df.flux), np.shape(df.ferr))
In [69]:
plt.scatter(df.time,df.flux)
plt.xlim(0,5)
plt.ylim(-0.03,0.03)
Out[69]:
In [193]:
y = df.loc[(df.time < 15) * (np.abs(df.flux)<0.02),'flux']
yerr = df.loc[(df.time < 15) * (np.abs(df.flux)<0.02),'ferr']
t = df.loc[(df.time < 15) * (np.abs(df.flux)<0.02),'time']
print(np.shape(t)), print(np.shape(yerr)), print(np.shape(t))
Out[193]:
In [194]:
A = 0.001
l = 10.
G = 10.0
sigma = 0.001
P = 2.0
# kernel= A * kernels.ExpSquaredKernel(l) * kernels.ExpSine2Kernel(G, P) + kernels.WhiteKernel(sigma)
kernel= A * kernels.ExpSquaredKernel(l) * kernels.ExpSine2Kernel(G, P) + kernels.WhiteKernel(sigma)
In [195]:
gp = george.GP(kernel,) #solver=george.HODLRSolver)
gp.compute(t, )
print(gp.lnlikelihood(y))
print(gp.grad_lnlikelihood(y))
In [196]:
import scipy.optimize as op
# Define the objective function (negative log-likelihood in this case).
def nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y, quiet=True)
# You need to compute the GP once before starting the optimization.
gp.compute(t)
# Print the initial ln-likelihood.
print(gp.lnlikelihood(y))
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print(gp.lnlikelihood(y))
In [197]:
print(results)
In [207]:
x = np.linspace(0, 15, 500)
mu, cov = gp.predict(y, x)
std = np.sqrt(np.diag(cov))
In [209]:
fig, ax = plt.subplots(1,1,figsize=[11,6], )
ax.errorbar(t, y, yerr=yerr, fmt=".k", capsize=0,alpha=0.7)
ax.fill_between(x,mu-std,mu+std, color="#ff7f0e", alpha=0.3,
edgecolor="none")
ax.set_xlim(8,15)
ax.set_ylabel('Relative brightness', fontsize=17)
ax.minorticks_on()
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]: