The filename of the figure is [TBD].pdf.
Author: Michael Gully-Santiago, gully@astro.as.utexas.edu
Date: January 25, 2015
In [1]:
%pylab inline
import emcee
import triangle
import pandas as pd
import seaborn as sns
from astroML.decorators import pickle_results
In [2]:
sns.set_context("paper", font_scale=2.0, rc={"lines.linewidth": 2.5})
sns.set(style="ticks")
Read in the data. We want "VG12"
In [3]:
df = pd.read_csv('../data/cln_20130916_cary5000.csv', index_col=0)
df = df[df.index > 1250.0]
In [4]:
plt.plot(df.index[::4], df.run11[::4]/100.0, label='On-mesh')
plt.plot(df.index, df.run10/100.0, label='Off-mesh')
plt.plot(df.index, df.run12/100.0, label='Shard2')
plt.plot(df.index, df.run9/100.0, label='DSP')
plt.plot(df.index, df.run15/100.0, label='VG08')
plt.plot(df.index, df.run17/100.0, label='VG08 alt')
#plt.plot(x, T_gap_Si_withFF_fast(x, 65.0, 0.5, n1)/T_DSP, label='Model')
plt.legend(loc='best')
plt.ylim(0.80, 1.05)
Out[4]:
Import all the local models, saved locally as etalon.py
. See the paper for derivations of these equations.
In [5]:
from etalon import *
np.random.seed(78704)
In [6]:
# Introduce the Real data, decimate the data.
x = df.index.values[::4]
N = len(x)
# Define T_DSP for the model
T_DSP = T_gap_Si(x, 0.0)
n1 = sellmeier_Si(x)
# Define uncertainty
yerr = 0.0004*np.ones(N)
iid_cov = np.diag(yerr ** 2)
# Select the spectrum of interest
# Normalize the spectrum by measured DSP Si wafer.
y = df.run11.values[::4]/100.0
Define the likelihood. In this case we are using two different gap sizes, but fixed fill factor.
In [7]:
def lnlike(dM, eps, lna, lns):
a, s = np.exp(lna), np.exp(lns)
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
sgn, logdet = np.linalg.slogdet(C)
if sgn <= 0:
return -np.inf
T_mix = 0.5 * (T_gap_Si_withFF_fast(x, dM+eps, 1.0, n1) + T_gap_Si_withFF_fast(x, eps, 1.0, n1))/T_DSP
r = y - T_mix
return -0.5 * (np.dot(r, np.linalg.solve(C, r)) + logdet)
Define the prior. We want to put a Normal prior on $d_M$: $d_M \sim \mathcal{N}(\hat{d_M}, \sigma_{d_M})$
In [8]:
def lnprior(dM, eps, lna, lns):
prior = -0.5 * ((49.0-dM)/6.0)**2.0
if not (31.0 < dM < 67 and 0.0 < eps < 60.0 and -12 < lna < -2 and 0 < lns < 10):
return -np.inf
return prior
Combine likelihood and prior to obtain the posterior.
In [9]:
def lnprob(p):
lp = lnprior(*p)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(*p)
Set up emcee
.
In [10]:
@pickle_results('SiGaps_12_VG12_twoGaps-sampler.pkl')
def hammer_time(ndim, nwalkers, dM_Guess, eps_Guess, a_Guess, s_Guess, nburnins, ntrials):
# Initialize the walkers
p0 = np.array([dM_Guess, eps_Guess, np.log(a_Guess), np.log(s_Guess)])
pos = [p0 + 1.0e-2*p0 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
pos, lp, state = sampler.run_mcmc(pos, nburnins)
sampler.reset()
pos, lp, state = sampler.run_mcmc(pos, ntrials)
return sampler
Set up the initial conditions
In [11]:
np.random.seed(78704)
ndim, nwalkers = 4, 32
dM_Guess = 49.0
eps_Guess = 15.0
a_Guess = 0.0016
s_Guess = 25.0
nburnins = 200
ntrials = 700
Run the burn-in phase. Run the full MCMC. Pickle the results.
In [12]:
sampler = hammer_time(ndim, nwalkers, dM_Guess, eps_Guess, a_Guess, s_Guess, nburnins, ntrials)
Linearize $a$ and $s$ for easy inspection of the values.
In [13]:
chain = sampler.chain
samples_lin = copy(sampler.flatchain)
samples_lin[:, 2:] = np.exp(samples_lin[:, 2:])
Inspect the chain.
In [14]:
fig, axes = plt.subplots(4, 1, figsize=(5, 6), sharex=True)
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.96, top=0.98,
wspace=0.0, hspace=0.05)
[a.plot(np.arange(chain.shape[1]), chain[:, :, i].T, "k", alpha=0.5)
for i, a in enumerate(axes)]
[a.set_ylabel("${0}$".format(l)) for a, l in zip(axes, ["d_M", "\epsilon", "\ln a", "\ln s"])]
axes[-1].set_xlim(0, chain.shape[1])
axes[-1].set_xlabel("iteration");
Linearize $a$ and $s$ for graphical purposes.
Make a triangle corner plot.
In [15]:
fig = triangle.corner(samples_lin,
labels=map("${0}$".format, ["d_M", "\epsilon", "a", "s"]),
quantiles=[0.16, 0.84])
In [16]:
fig = triangle.corner(samples_lin[:,0:2],
labels=map("${0}$".format, ["d_M", "\epsilon"]),
quantiles=[0.16, 0.84])
plt.savefig("VG12_twoGaps_cornerb.pdf")
Calculate confidence intervals.
In [17]:
dM_mcmc, eps_mcmc, a_mcmc, s_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples_lin, [16, 50, 84],
axis=0)))
dM_mcmc, eps_mcmc, a_mcmc, s_mcmc
Out[17]:
In [18]:
print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*dM_mcmc)
print "{:.0f}^{{+{:.0f}}}_{{-{:.0f}}}".format(*eps_mcmc)
Overlay draws from the Gaussian Process.
In [19]:
plt.figure(figsize=(6,3))
for dM, eps, a, s in samples_lin[np.random.randint(len(samples_lin), size=60)]:
off_diag_terms = a**2 * np.exp(-0.5 * (x[:, None] - x[None, :])**2 / s**2)
C = iid_cov + off_diag_terms
fit = 0.5*(T_gap_Si_withFF_fast(x, dM+eps, 1.0, n1)+T_gap_Si_withFF_fast(x, eps, 1.0, n1))/T_DSP
vec = np.random.multivariate_normal(fit, C)
plt.plot(x, vec,"-b", alpha=0.06)
plt.step(x, y,color="k", label='Measurement')
fit = 0.5*(T_gap_Si_withFF_fast(x, dM_mcmc[0]+eps_mcmc[0], 1, n1)+T_gap_Si_withFF_fast(x, eps_mcmc[0], 1, n1))/T_DSP
fit_label = 'Model with $d_M={:.0f}$ nm, $\epsilon={:.0f}$'.format(dM_mcmc[0], eps_mcmc[0])
plt.plot(x, fit, '--', color=sns.xkcd_rgb["pale red"], alpha=1.0, label=fit_label)
fit1 = T_gap_Si_withFF_fast(x, 43, 0.5, n1)/T_DSP
fit2 = T_gap_Si_withFF_fast(x, 55, 0.5, n1)/T_DSP
fit2_label = 'Model with $d_M={:.0f}\pm{:.0f}$ nm, $\epsilon={:.0f}$'.format(49, 6, 0)
plt.fill_between(x, fit1, fit2, alpha=0.6, color=sns.xkcd_rgb["green apple"])
plt.plot([-10, -9], [-10, -9],"-", alpha=0.85, color=sns.xkcd_rgb["green apple"], label=fit2_label)
plt.plot([-10, -9], [-10, -9],"-b", alpha=0.85, label='Draws from GP')
plt.plot([0, 5000], [1.0, 1.0], '-.k', alpha=0.5)
plt.fill_between([1200, 1250], 2.0, 0.0, hatch='\\', alpha=0.4, color='k', label='Si absorption cutoff')
plt.xlabel('$\lambda$ (nm)');
plt.ylabel('$T_{gap}$');
plt.xlim(1200, 2501);
plt.ylim(0.9, 1.019);
plt.legend(loc='lower right')
plt.savefig("VG12_twoGapsb.pdf", bbox_inches='tight')
The end.