Quantum Hamiltonian Learning: Nitrogen Vacancies in Diamond

After all dependencies are met, this jupyter notebook should be runnable from top to bottom with no necessary intervention by selecting Cell>Run All. You may want to adjust the settings in the Parallel Setup section below. This entire notebook takes us about an hour to run on a computer with a four-core Intel Core i7-4790 processor.

Import Modules


In [1]:
%matplotlib inline
import warnings
warnings.filterwarnings('default', category=DeprecationWarning, module='.*/qinfer/.*')
from __future__ import division # Ensures that a/b is always a float.


/home/cgranade/anaconda3/envs/nvmeas/lib/python2.7/site-packages/matplotlib/style/core.py:197: UserWarning: In /home/cgranade/.config/matplotlib/stylelib/ggplot-rq.mplstyle: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
  warnings.warn(message)

In [2]:
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import qinfer as qi
from models import RabiRamseyModel, ReferencedPoissonModel
import models as m
import numpy.lib.recfunctions as rfn
import scipy.io as sio
from scipy.special import gammaln, xlogy, erf, erfinv
from scipy.misc import logsumexp
from scipy.signal import find_peaks_cwt
from scipy.optimize import curve_fit
from scipy.stats import chi2
from datetime import datetime
import os

Setup


In [3]:
np.random.seed(1234)

Plotting

Plotting and exporting. Change the overwrite flag if you want regenerated figures to be saved to disk.


In [4]:
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'

SIZE = 14
plt.rc('font', size=SIZE)          # controls default text sizes
plt.rc('axes', titlesize=SIZE)     # fontsize of the axes title
plt.rc('axes', labelsize=SIZE)     # fontsize of the x any y labels
plt.rc('xtick', labelsize=10)      # fontsize of the tick labels
plt.rc('ytick', labelsize=10)      # fontsize of the tick labels
plt.rc('legend', fontsize=SIZE)    # legend fontsize
plt.rc('figure', titlesize=SIZE)   # size of the figure title

overwrite = True
def export_figure(fig, name, extensions=['.png', '.pdf']):
    for ext in extensions:
        fname = '../fig/' + name + ext
        if not os.path.isfile(fname):
            print('Saving {}'.format(fname))
            fig.savefig(fname)
        elif overwrite:
            print('Overwriting {}'.format(fname))
            fig.savefig(fname)
        else:
            print('Skipping {}'.format(fname))

Parallel Setup

The speedup is near linear with the number of physical CPU cores used. Set parallel to True if you have an ipyparallel cluster available. You might need to change the argument of Client() to match your non-default profile.


In [5]:
parallel=True
if parallel:
    from ipyparallel import Client
    rc = Client()
    dview = rc[:]
    dview.use_dill()
    
    with dview.sync_imports():
        import os

    # Change directory on engines so that we can import models.
    if dview.execute('os.chdir("{}")'.format(os.getcwd())).get():
        print "Changed engine directories to notebook directory."
        
    # To ensure that we don't enter into a Matplotlib event loop
    # on our engines, we set the MPLBACKEND to the non-interactive
    # Agg backend first. This is especially useful if the engines
    # are connected via SSH.
    if dview.execute('os.environ["MPLBACKEND"] = "Agg"').get():
        print "Changed MPL backend."
        
    # Force each engine to import QInfer before proceeding.
    if dview.execute('import qinfer as qi').get():
        print "Successfully imported QInfer on engines."
        
    if dview.execute('import models').get():
        print "Successfully imported models."
    
    print "Engines connected: {}".format(len(rc.ids))


importing os on engine(s)
Changed engine directories to notebook directory.
Changed MPL backend.
Successfully imported QInfer on engines.
Successfully imported models.
Engines connected: 8

In [6]:
if parallel:
    assert(dview.map_sync(lambda x: models.RabiRamseyModel, [True]) is not None)

Test RabiRamseyModel for bugs

On development versions of QInfer, unit testing functionality has been exposed to as a part of the public interface, so that we can use that here to test our models. If using the release version of QInfer, this will not be available, so we skip tests for now.


In [7]:
try:
    import qinfer.tests as qit
except ImportError:
    qit = None

In [8]:
if qit:
    test_eps = rfn.merge_arrays([
        np.linspace(0, 1, 10, dtype=[('t','float')]),
        np.linspace(0, 4,10, dtype=[('tau','float')]),
        np.linspace(0, 2*np.pi, 10, dtype=[('phi','float')]),
        (np.random.normal(size=10)>0).astype([('emode','int')])
    ])
    test_prior = qi.UniformDistribution(np.array([
                [9,11], [10,12], [-1,1], [1.5,3.5], [0.5,5], [9,11]             
            ]))
    test_model = RabiRamseyModel()
    qit.test_model(test_model, test_prior, test_eps)


.......
----------------------------------------------------------------------
Ran 7 tests in 0.094s

OK

In [9]:
if qit:
    test_eps = rfn.merge_arrays([
        np.linspace(0,1,10, dtype=[('t','float')]),
        np.linspace(0,4,10, dtype=[('tau','float')]),
        np.linspace(0,2*np.pi,10, dtype=[('phi','float')]),
        (np.random.normal(size=10)>0).astype([('emode','int')]),
        np.array([0,1,2,0,1,2,0,1,2,2]).astype([('mode','int')])
    ])
    test_prior = qi.ProductDistribution(
            qi.UniformDistribution(np.array([
                [9,11], [10,12], [-1,1], [1.5,3.5], [0.5,5], [9,11]             
            ])),
            qi.GammaDistribution(mean=10000,var=1000),
            qi.GammaDistribution(mean=3000,var=300)
        )
    test_model = ReferencedPoissonModel(RabiRamseyModel())
    qit.test_model(test_model, test_prior, test_eps)


.......
----------------------------------------------------------------------
Ran 7 tests in 0.041s

OK

Load Data from Disk

Experiment Information

Details about the experiment are stored in a JSON file. The keys mean:

  • center_freq: (Hz) Center frequency of microwave circuit (synthesizer+intermediate freq)
  • dark_idx: Column index of dark counts (1 indexed)
  • bright_idx: Column index of bright counts (1 indexed)
  • signal_idx: Column index of dark counts (1 indexed)
  • filename: GLOB description of matlab data files. There is one file for every average
  • matlab_variable: Variable name of data in .mat file
  • motor_position: Unused for these experiments
  • n_avgs: Number of averages experiment was told to perform (this could have been manually truncated by experimentalist)
  • n_shots: Number of back-to-back repetitions of each experiment per average
  • rabi_guess: Unused for these experiments
  • rabi_times: (s) Pulse lengths of each Rabi experiment
  • ramsey_times: (s) Wait periods of each Ramsey experiment
  • t_pulse: (s) Length of tip pulse in Ramsey experiment
  • static_field: Unused for these experiments
  • uwave_dB: dB of pulses relative to maximum power (0dB is max, -6dB is quarter power; half amplitude, etc)

In [10]:
import json
with open('../data/rabi-ramsey-data1/notes.txt') as data_file:    
    exp_data = json.load(data_file)
rabi_times = 1e6 * eval(exp_data['RABI']['rabi_times'])
ramsey_times = 1e6 * eval(exp_data['RAMSEY']['ramsey_times'])
exp_data


Out[10]:
{u'RABI': {u'bright_idx': 1,
  u'center_freq': 2870000000.0,
  u'dark_idx': 2,
  u'filename': u'rabi_data/Avg{:03d}.mat',
  u'matlab_variable': u'M',
  u'motor_pos': [0, 0, 0],
  u'n_avgs': 350,
  u'n_shots': 30000,
  u'rabi_guess': 5600000.0,
  u'rabi_times': u'np.linspace(8e-9,800e-9,100)',
  u'signal_idx': 3,
  u'static_field': [0, 0, 0],
  u'uwave_dB': 0},
 u'RAMSEY': {u'bright_idx': 1,
  u'center_freq': 2870000000.0,
  u'dark_idx': 2,
  u'filename': u'ramsey_data/Avg{:03d}.mat',
  u'matlab_variable': u'M',
  u'motor_pos': [0, 0, 0],
  u'n_avgs': 400,
  u'n_shots': 30000,
  u'rabi_guess': 5600000.0,
  u'ramsey_times': u'np.linspace(10e-9,2e-6,200)',
  u'signal_idx': 3,
  u'static_field': [0, 0, 0],
  u't_pulse': 4.4e-08,
  u'uwave_dB': 0}}

Raw Counts Data Import

Here we load the data from file. This is also where we specify that the data is to be batched into ten chronological groups. Note that successive averages (the *.mat files) happen chronologically, with at most a minute of delay between them.


In [11]:
#only load up until the 400th average to make a nice round number
rabi_data = m.load_averages('../data/rabi-ramsey-data1/rabi_data/*.mat',[])[:400,...]
ramsey_data = m.load_averages('../data/rabi-ramsey-data1/ramsey_data/*.mat',[])[:400,...]
def batch_data(data, n_batch):
    sh = data.shape
    assert(np.mod(sh[0]/n_batch,1)==0)
    return np.sum(data.reshape((n_batch, int(sh[0]/n_batch)) + sh[1:]), axis=1)

n_batch = 10
rabi_data = batch_data(rabi_data, n_batch)
ramsey_data = batch_data(ramsey_data, n_batch)
idx_bright, idx_dark, idx_sig = 0,1,2

ramsey_bright_refs = ramsey_data[:,:,idx_bright]
ramsey_dark_refs = ramsey_data[:,:,idx_dark]
rabi_bright_refs = rabi_data[:,:,idx_bright]
rabi_dark_refs = rabi_data[:,:,idx_dark]

n_rabi = rabi_data.shape[1]
n_ramsey = ramsey_data.shape[1]

Raw Data Plots


In [12]:
fig = plt.figure()
plt.plot(rabi_times, np.sum(rabi_data,axis=0),lw=2)
plt.xlabel(r'$t_r$ $(\mu $s$)$', size=15)
plt.ylabel('Total Photon Counts')
plt.title('Rabi Experiment')

export_figure(fig, 'rabi_raw_data')


Overwriting ../fig/rabi_raw_data.png
Overwriting ../fig/rabi_raw_data.pdf

In [13]:
fig = plt.figure()
plt.plot(ramsey_times, np.sum(ramsey_data,axis=0), lw=2)
plt.xlabel(r'$t_w$ $(\mu $s$)$', size=15)
plt.ylabel('Total Photon Counts')
plt.title('Ramsey Experiment')

export_figure(fig, 'ramsey_raw_data')


Overwriting ../fig/ramsey_raw_data.png
Overwriting ../fig/ramsey_raw_data.pdf

In [14]:
freqs = np.fft.fftshift(np.fft.fftfreq(n_rabi, rabi_times[1]-rabi_times[2]))
rabi_fft = np.abs(np.fft.fftshift(np.fft.fft(np.sum(rabi_data[:,:,2],axis=0)-np.mean(np.sum(rabi_data[:,:,2],axis=0),axis=0))))

fig = plt.figure()
plt.plot(freqs, np.abs(rabi_fft), lw=2, c='r')
plt.xlim([0,40])
plt.xlabel(r'$f_w$ (MHz)', size=15)
plt.ylabel('arb.')
plt.title('Rabi Experiment FFT')

export_figure(fig, 'rabi_raw_data_fft')


Overwriting ../fig/rabi_raw_data_fft.png
Overwriting ../fig/rabi_raw_data_fft.pdf

In [15]:
freqs = np.fft.fftshift(np.fft.fftfreq(n_ramsey, ramsey_times[1]-ramsey_times[2]))
ramsey_fft = np.abs(np.fft.fftshift(np.fft.fft(np.sum(ramsey_data[:,:,2],axis=0)-np.mean(np.sum(ramsey_data[:,:,2],axis=0),axis=0))))

fig = plt.figure()
plt.plot(freqs, np.abs(ramsey_fft),lw=2,c='r')
plt.xlim([0,20])
plt.xlabel(r'$f_w$ (MHz)', size=15)
plt.ylabel('arb.')
plt.title('Ramsey Experiment FFT')

export_figure(fig, 'ramsey_raw_data_fft')


Overwriting ../fig/ramsey_raw_data_fft.png
Overwriting ../fig/ramsey_raw_data_fft.pdf

Test the peak finding function


In [16]:
freqs = np.fft.fftshift(np.fft.fftfreq(n_ramsey, ramsey_times[1]-ramsey_times[2]))
ramsey_fft = np.abs(np.fft.fftshift(np.fft.fft(ramsey_data[8,:,2]-np.mean(ramsey_data[8,:,2]))))
peak_ind = m.detect_peaks(ramsey_fft[freqs>0],2000)
print freqs[freqs>0][peak_ind]
plt.plot(freqs, np.abs(ramsey_fft))
plt.plot(freqs[freqs>0][peak_ind],ramsey_fft[peak_ind],'o')
plt.xlim([0,20])


[ 7.   3.   1.5]
Out[16]:
(0, 20)

We see that the scatter plot of the reference plots is much different for Rabi and Ramsey; run on different days of the week.


In [17]:
fig = plt.figure()
plt.scatter(np.sum(rabi_dark_refs,axis=0),np.sum(rabi_bright_refs,axis=0),color='r',label='Rabi')
plt.scatter(np.sum(ramsey_dark_refs,axis=0),np.sum(ramsey_bright_refs,axis=0),color='b',label='Ramsey')
plt.xlabel('Total Dark Reference Counts')
plt.ylabel('Total Bright Reference Counts')
plt.legend(loc=2)

export_figure(fig, 'rabi_ramsey_ref_scatter')


Overwriting ../fig/rabi_ramsey_ref_scatter.png
Overwriting ../fig/rabi_ramsey_ref_scatter.pdf

Initialize QInfer Models

The QInfer models we will use for inference


In [18]:
ham_model = RabiRamseyModel()
ref_model = ReferencedPoissonModel(ham_model) 

model = qi.DirectViewParallelizedModel(ref_model, dview, serial_threshold=1) if parallel else ref_model

Construct Priors and Reference Posteriors

Model Parameter Prior

Model parameters:

  • 0: :math:\Omega, Rabi strength (MHz); coefficient of Sx
  • 1: :math:\omega_e, Zeeman frequency (MHz); coefficient of Sz
  • 2: :math:\Delta \omega_c, ZFS detuning (MHz); coefficient of Sz^2
  • 3: :math:\A_N, Nitrogen hyperfine splitting (MHz); modeled as incoherent average
  • 4: :math:T_2^-1, inverse of electron T2* (MHz)
  • 5: :math:\Omega_\text{Ramsey}, the Rabi strength (MHz) while doing a Ramsey experiment

Experiment parameters:

  • mode: Specifies whether a reference or signal count is being performed.
  • t: Pulse width
  • tau: Ramsey wait time (only relevent if mode is RabiRamseyModel.RAMSEY)
  • phi: Ramsey phase between pulses (")

In [19]:
model_prior = qi.UniformDistribution(np.array([
            [0,10],
            [0,10],
            [-5,5],
            [1.5,3.5],
            [100**-1,1**-1],
            [0,0]
        ]))

Reference Prior

Use an empirical Bayes prior for the references for each batch. The variations in this empirical distribution are coming across experiment types. We allow a multplication to the variance to make the prior less informative than the empirical distribution. Use a different prior depending on whether it is Rabi or Ramsey.


In [20]:
# reference means and covariances for each batch
rabi_ref_mean = np.mean(np.stack((rabi_bright_refs, rabi_dark_refs),axis=0),axis=2).T
rabi_ref_cov = np.array([np.cov(x) for x in np.stack((rabi_bright_refs, rabi_dark_refs),axis=0).transpose((1,0,2))])
ramsey_ref_mean = np.mean(np.stack((ramsey_bright_refs, ramsey_dark_refs),axis=0),axis=2).T
ramsey_ref_cov = np.array([np.cov(x) for x in np.stack((ramsey_bright_refs, ramsey_dark_refs),axis=0).transpose((1,0,2))])

# reference means and covariances for the entire dataset
rabi_ref_single_mean = np.mean(np.sum(np.stack((rabi_bright_refs, rabi_dark_refs), axis=0), axis=1), axis=1)
rabi_ref_single_cov = np.cov(np.sum(np.stack((rabi_bright_refs, rabi_dark_refs), axis=0), axis=1))
ramsey_ref_single_mean = np.mean(np.sum(np.stack((ramsey_bright_refs, ramsey_dark_refs), axis=0), axis=1), axis=1)
ramsey_ref_single_cov = np.cov(np.sum(np.stack((ramsey_bright_refs, ramsey_dark_refs), axis=0), axis=1))

# empirical priors for each batch
def ref_prior(idx_batch, emode, var_mult=1):
    if emode == RabiRamseyModel.RABI:
        return qi.ProductDistribution(
            qi.GammaDistribution(mean=rabi_ref_mean[idx_batch,0], var=var_mult * rabi_ref_cov[idx_batch,0,0]),
            qi.GammaDistribution(mean=rabi_ref_mean[idx_batch,1], var=var_mult * rabi_ref_cov[idx_batch,1,1])
        )
    else:
        return qi.ProductDistribution(
            qi.GammaDistribution(mean=ramsey_ref_mean[idx_batch,0], var=var_mult * ramsey_ref_cov[idx_batch,0,0]),
            qi.GammaDistribution(mean=ramsey_ref_mean[idx_batch,1], var=var_mult * ramsey_ref_cov[idx_batch,1,1])
        )
    
# empirical priors for entire dataset
def ref_single_prior(emode, var_mult=1):
    if emode == RabiRamseyModel.RABI:
        return qi.ProductDistribution(
            qi.GammaDistribution(mean=rabi_ref_single_mean[0], var=var_mult * rabi_ref_single_cov[0,0]),
            qi.GammaDistribution(mean=rabi_ref_single_mean[1], var=var_mult * rabi_ref_single_cov[1,1])
        )
    else:
        return qi.ProductDistribution(
            qi.GammaDistribution(mean=ramsey_ref_single_mean[0], var=var_mult * ramsey_ref_single_cov[0,0]),
            qi.GammaDistribution(mean=ramsey_ref_single_mean[1], var=var_mult * ramsey_ref_single_cov[1,1])
        )
    
def plot_ref_dist(dist, **kwargs):
    test_samples = dist.sample(1000)
    plt.scatter(test_samples[:,1],test_samples[:,0],**kwargs)

In [21]:
plot_ref_dist(ref_prior(8,RabiRamseyModel.RABI,var_mult=100), color='k')
plot_ref_dist(ref_prior(8,RabiRamseyModel.RAMSEY,var_mult=100),color='green')

plot_ref_dist(ref_single_prior(RabiRamseyModel.RABI,var_mult=100), color='k')
plot_ref_dist(ref_single_prior(RabiRamseyModel.RAMSEY,var_mult=100),color='green')


Uncorrelated Posterior

This function returns the product gamma posterior of the reference distribution given reference counts x and y. This is easy to compute when the prior is just a product of Gammas.


In [22]:
# posterior for each batch
def ref_uncorr_posterior(x, y, emode, idx_batch, var_mult=1, data_divider=1):
    ref_mean, ref_cov = (rabi_ref_mean, rabi_ref_cov)  \
        if emode == RabiRamseyModel.RABI else (ramsey_ref_mean, ramsey_ref_cov)
    mu_b, mu_d = ref_mean[idx_batch, 0] / data_divider, ref_mean[idx_batch, 1] / data_divider
    var_b, var_d = var_mult * ref_cov[idx_batch, 0,0] / data_divider, var_mult * ref_cov[idx_batch, 1,1] / data_divider
        
    # use the conjugate prior update rule for gamma/poisson
    return qi.ProductDistribution(
        qi.GammaDistribution(alpha=x/data_divider + mu_b**2 / var_b, beta=1 + mu_b / var_b),
        qi.GammaDistribution(alpha=y/data_divider + mu_d**2 / var_d, beta=1 + mu_d / var_d)
    )

# posteriors for entire dataset
def ref_single_uncorr_posterior(x, y, emode, var_mult=1, data_divider=1):
    ref_mean, ref_cov = (rabi_ref_single_mean, rabi_ref_single_cov) \
        if emode == RabiRamseyModel.RABI else (ramsey_ref_single_mean, ramsey_ref_single_cov)
    mu_b, mu_d = ref_mean[0] / data_divider, ref_mean[1] / data_divider
    var_b, var_d = var_mult * ref_cov[0,0] / data_divider, var_mult * ref_cov[1,1] / data_divider
        
    # use the conjugate prior update rule for gamma/poisson
    return qi.ProductDistribution(
        qi.GammaDistribution(alpha=x / data_divider + mu_b**2 / var_b, beta=1 + mu_b / var_b),
        qi.GammaDistribution(alpha=y / data_divider + mu_d**2 / var_d, beta=1 + mu_d / var_d)
    )

Correlated Posterior

This function also returns a posterior for the reference distribution, as above, except under the slightly more complicated bivariate poisson model discussed in the appendix of the paper. We did not see significant improvement using this (technically better) posterior, at least in our reference count regime.


In [23]:
def ref_corr_posterior_weights(x, y, a0, b0, a1, b1, a2, b2, thresh=1e-2):
   
    def posgammaln(v):
        return gammaln(np.maximum(0,v))
    
    # we need to be quite careful about overflow; work in log space
    k = np.arange(np.min([x,y]))
    w = gammaln(k + a0) + posgammaln(x-k+a1) + posgammaln(y-k+a2) \
        + xlogy(k, (1+b1)*(1+b2)/(1+b0)) - gammaln(k+1) \
        - posgammaln(x-k+1) - posgammaln(y-k+1)
    w = w - logsumexp(w)
    w = np.exp(w)
    
    if thresh is not None:
        # in this case we get rid of w values which are too small
        mask = w > np.max(w)*thresh
        return k[mask], w[mask]
    else:
        return k, w

class BivariatePoissonPriorTerm(qi.Distribution):
    def __init__(self, a0, b0, a1, b1, a2, b2):
        self._dist = qi.ProductDistribution(
            qi.GammaDistribution(alpha=a0, beta=b0),
            qi.GammaDistribution(alpha=a1, beta=b1),
            qi.GammaDistribution(alpha=a2, beta=b2),
        )
    @property
    def n_rvs(self):
        return 2
    def sample(self, n=1):
        samples = self._dist.sample(n)
        return np.vstack([samples[:,0]+samples[:,1], samples[:,0]+samples[:,2]]).T
    

def ref_corr_posterior_generic(x, y, ref_mean, ref_cov, var_mult=1, thresh=1e-2):
    
    mu_b, mu_d = ref_mean[0], ref_mean[1]
    var_b, var_d, cov = var_mult*ref_cov[0,0], var_mult*ref_cov[1,1], ref_cov[0,1]
    a0, b0 = cov, 1
    a1, b1 = (mu_b-cov)**2 / (var_b-cov), (mu_b-cov) / (var_b-cov)
    a2, b2 = (mu_d-cov)**2 / (var_d-cov), (mu_d-cov) / (var_d-cov)
    
    k, w = ref_corr_posterior_weights(x, y, a0, b0, a1, b1, a2, b2, thresh=thresh)
    
    a0 = a0 + k
    a1 = a1 + x - k
    a2 = a2 + y - k
    o = np.ones(k.size)
    b0, b1, b2 = b0 + o, b1 + o, b2 + o
    
    return qi.MixtureDistribution(w, BivariatePoissonPriorTerm, dist_args=np.vstack([a0,b0,a1,b1,a2,b2]).T)

def ref_corr_posterior(x, y, emode, idx_batch, var_mult=1, thresh=1e-2):
    ref_mean, ref_cov = (rabi_ref_mean[idx_batch,...], rabi_ref_cov[idx_batch,...])  \
        if emode == RabiRamseyModel.RABI else (ramsey_ref_mean[idx_batch,...], ramsey_ref_cov[idx_batch,...])
    return ref_corr_posterior_generic(x, y, ref_mean, ref_cov, var_mult=var_mult, thresh=thresh)

def ref_single_corr_posterior(x, y, emode, var_mult=1, thresh=1e-2):
    ref_mean, ref_cov = (rabi_ref_single_mean, rabi_ref_single_cov) \
        if emode == RabiRamseyModel.RABI else (ramsey_ref_single_mean, ramsey_ref_single_cov)
    return ref_corr_posterior_generic(x, y, ref_mean, ref_cov, var_mult=var_mult, thresh=thresh)

Comparison Plots

In this section we make a bunch of plots as sanity checks for the above priors and posteriors. It would be really easy to write "rabi" instead of "ramsey" somewhere, for example.

First look at the empirical distribution (over experiments) of some batch, along with samples drawn from the corresponding prior.


In [24]:
idx_batch = 8

fig = plt.figure(figsize=(10,4))

plt.subplot(1,2,1)
plot_ref_dist(ref_prior(idx_batch, RabiRamseyModel.RABI, var_mult=16), color='b', label='Pessimistic Prior')
plt.scatter(rabi_dark_refs[idx_batch,:], rabi_bright_refs[idx_batch,:], color='r', label='Actual Distribution')
plt.xlabel('Dark References')
plt.ylabel('Bright References')
plt.title(r'Rabi: Pessimistic Prior, $16\sigma^2$')
plt.legend(loc=1)

plt.subplot(1,2,2)
plot_ref_dist(ref_prior(idx_batch, RabiRamseyModel.RAMSEY, var_mult=16), color='b', label='Pessimistic Prior')
plt.scatter(ramsey_dark_refs[idx_batch,:], ramsey_bright_refs[idx_batch,:], color='r', label='Actual Distribution')
plt.xlabel('Dark References')
plt.ylabel('Bright References')
plt.title(r'Ramsey: Pessimistic Prior, $16\sigma^2$')
plt.legend(loc=1)


Out[24]:
<matplotlib.legend.Legend at 0x7f7cea823310>

Same, but with the prior over the summed up data.


In [25]:
fig = plt.figure(figsize=(10,4))

plt.subplot(1,2,1)
plot_ref_dist(ref_single_prior(RabiRamseyModel.RABI, var_mult=16), color='b', label='Pessimistic Prior')
plt.scatter(np.sum(rabi_dark_refs, axis=0), np.sum(rabi_bright_refs, axis=0), color='r', label='Actual Distribution')
plt.xlabel('Dark References')
plt.ylabel('Bright References')
plt.title(r'Rabi: Pessimistic Prior, $16\sigma^2$')
plt.legend(loc=1)

plt.subplot(1,2,2)
plot_ref_dist(ref_single_prior(RabiRamseyModel.RAMSEY, var_mult=16), color='b', label='Pessimistic Prior')
plt.scatter(np.sum(ramsey_dark_refs, axis=0), np.sum(ramsey_bright_refs, axis=0), color='r', label='Actual Distribution')
plt.xlabel('Dark References')
plt.ylabel('Bright References')
plt.title(r'Ramsey: Pessimistic Prior, $16\sigma^2$')
plt.legend(loc=1)


Out[25]:
<matplotlib.legend.Legend at 0x7f7ceb501ed0>

Now look at the posteriors for batch 5 and experiment configuration 50


In [26]:
idx_batch = 5
idx_eps = 50

fig = plt.figure(figsize=(10,4))

plt.subplot(1,2,1)
plot_ref_dist(ref_prior(idx_batch, RabiRamseyModel.RABI, var_mult=16),color='b',label='Prior')
x, y = rabi_bright_refs[idx_batch,idx_eps],rabi_dark_refs[idx_batch,idx_eps]
plot_ref_dist(ref_corr_posterior(x,y,RabiRamseyModel.RABI,idx_batch,var_mult=16),color='r',label='Correlated Posterior')
plot_ref_dist(ref_uncorr_posterior(x,y,RabiRamseyModel.RABI,idx_batch,var_mult=16),color='g',label='Uncorrelated Posterior')

plt.scatter(y,x,marker='*',color='k', label='datum', s=16)
plt.title('Rabi')

plt.subplot(1,2,2)
plot_ref_dist(ref_prior(idx_batch, RabiRamseyModel.RAMSEY, var_mult=16),color='b',label='Prior')
x, y = ramsey_bright_refs[idx_batch,idx_eps],ramsey_dark_refs[idx_batch,idx_eps]
plot_ref_dist(ref_corr_posterior(x,y,RabiRamseyModel.RAMSEY,idx_batch,var_mult=16),color='r',label='Correlated Posterior')
plot_ref_dist(ref_uncorr_posterior(x,y,RabiRamseyModel.RAMSEY,idx_batch,var_mult=16),color='g',label='Uncorrelated Posterior')

plt.scatter(y,x,marker='*',color='k', label='datum', s=16)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title('Ramsey')

export_figure(fig, 'ref-posterior-comparison')


Overwriting ../fig/ref-posterior-comparison.png
Overwriting ../fig/ref-posterior-comparison.pdf

Finally, the same thing with the summed data


In [27]:
idx_eps = 50

fig = plt.figure(figsize=(10,4))

plt.subplot(1,2,1)
plot_ref_dist(ref_single_prior(RabiRamseyModel.RABI, var_mult=16),color='b',label='Prior')
x, y = np.sum(rabi_bright_refs[:,idx_eps]), np.sum(rabi_dark_refs[:,idx_eps])
plot_ref_dist(ref_single_corr_posterior(x,y,RabiRamseyModel.RABI,var_mult=16),color='r',label='Correlated Posterior')
plot_ref_dist(ref_single_uncorr_posterior(x,y,RabiRamseyModel.RABI,var_mult=16),color='g',label='Uncorrelated Posterior')

plt.scatter(y,x,marker='*',color='k', label='datum', s=16)
plt.title('Rabi')

plt.subplot(1,2,2)
plot_ref_dist(ref_single_prior(RabiRamseyModel.RAMSEY, var_mult=16),color='b',label='Prior')
x, y = np.sum(ramsey_bright_refs[:,idx_eps]), np.sum(ramsey_dark_refs[:,idx_eps])
plot_ref_dist(ref_single_corr_posterior(x,y,RabiRamseyModel.RAMSEY,var_mult=16),color='r',label='Correlated Posterior')
plot_ref_dist(ref_single_uncorr_posterior(x,y,RabiRamseyModel.RAMSEY,var_mult=16),color='g',label='Uncorrelated Posterior')

plt.scatter(y,x,marker='*',color='k', label='datum', s=16)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title('Ramsey')


Out[27]:
<matplotlib.text.Text at 0x7f7ceb7dc690>

For fun, see what happens if you accidentally use the prior of a single batch but enter the summed data. I definitely wasn't doing this for multiple months.


In [28]:
idx_batch = 2
idx_eps = 34

plot_ref_dist(ref_prior(idx_batch, RabiRamseyModel.RAMSEY, var_mult=16),color='b',label='Prior')
x, y = np.sum(ramsey_bright_refs[:,idx_eps]), np.sum(ramsey_dark_refs[:,idx_eps])
plot_ref_dist(ref_corr_posterior(x,y,RabiRamseyModel.RAMSEY,idx_batch,var_mult=16),color='r',label='Correlated Posterior')
plot_ref_dist(ref_uncorr_posterior(x,y,RabiRamseyModel.RAMSEY,idx_batch,var_mult=16),color='g',label='Uncorrelated Posterior')
plt.scatter(y,x,marker='*',color='k', label='datum', s=16)

plt.scatter(y,x,marker='*',color='k', label='datum', s=16)
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title('Terrible prior')


Out[28]:
<matplotlib.text.Text at 0x7f7ceb6ba450>

Total Prior

Combine the Hamiltonian parameter prior with the reference prior. Note the reference prior doesn't really matter since we will be updating those priors manually with the conjugate posterior.


In [29]:
prior = qi.ProductDistribution(model_prior, ref_prior(0,0))

Construct exparams and outcomes arrays

In this section we do the tedious but important task of shuffling the rabi and Ramsey experiment data together. Rerun these cells whenever you want a new order for expparams and outcomes.


In [30]:
idxs_data = np.empty(n_rabi+n_ramsey, dtype=int)
emodes = np.concatenate([RabiRamseyModel.RAMSEY * np.ones(n_ramsey),
                         RabiRamseyModel.RABI * np.ones(n_rabi)])
np.random.shuffle(emodes)
idxs_data[emodes.astype(bool)] = np.arange(n_ramsey)
idxs_data[(np.logical_not(emodes)).astype(bool)] = np.arange(n_rabi)

In [31]:
def make_expparam():
    for idx_exp in range(n_rabi + n_ramsey):
        idx_data = idxs_data[idx_exp]
        emode = emodes[idx_exp]
        mode = ref_model.SIGNAL
        if emode == RabiRamseyModel.RAMSEY:
            t = exp_data['RAMSEY']['t_pulse'] * 1e6
            tau = ramsey_times[idx_data]
            phi = 0
        else:
            t = rabi_times[idx_data]
            tau = 0
            phi = 0
        yield np.array([(t, tau, phi, emode, mode)], dtype=model.expparams_dtype)
def get_outcomes(idx_batch):
    for idx_exp in range(n_rabi + n_ramsey):
        idx_data = idxs_data[idx_exp]
        emode = emodes[idx_exp]
        if emode == RabiRamseyModel.RAMSEY:
            outcome = ramsey_data[idx_batch, idx_data, :]
        else:
            outcome = rabi_data[idx_batch, idx_data, :]
        yield outcome
expparams = list(make_expparam())
outcomes = np.array([np.array(list(get_outcomes(idx_batch))) for idx_batch in range(n_batch)])

Test Model and Updater

Test Model Simulator

Final test of our QInfer model before the inference begins: make sure it generate sensible looking data when given a list of "true" model parameter values.


In [32]:
true_value = np.array([[6, 1.5, 0, 2.1, 10**-1, 6, 2400, 1400]])
sim_data = np.mean(ref_model.simulate_experiment(true_value, np.array(expparams).flatten(),repeat=10)[:,0,:],axis=0)

In [33]:
plt.plot(rabi_times, sim_data[emodes==0])
plt.title('Rabi Test Run')
plt.xlabel('$t~(\mu s)$')
plt.ylabel('Photon counts')


Out[33]:
<matplotlib.text.Text at 0x7f7ceb50afd0>

In [34]:
plt.plot(ramsey_times, sim_data[emodes==1])
plt.ylim([1400,2400])
plt.title('Ramsey Test Run')
plt.xlabel('$t_w~(\mu s)$')
plt.ylabel('Photon counts')


Out[34]:
<matplotlib.text.Text at 0x7f7cea6d2a50>

In [35]:
freqs = np.fft.fftshift(np.fft.fftfreq(n_ramsey, ramsey_times[1]-ramsey_times[2]))
sim_ramsey_fft = np.abs(np.fft.fftshift(np.fft.fft(sim_data[emodes==1]-np.mean(sim_data[emodes==1]))))
peak_ind = m.detect_peaks(sim_ramsey_fft[freqs>0],3000)
print freqs[freqs>0][peak_ind]
plt.plot(freqs, np.abs(sim_ramsey_fft))
plt.plot(freqs[freqs>0][peak_ind],sim_ramsey_fft[peak_ind],'o')
plt.xlim([0,20])


[ 7.  3.  1.]
Out[35]:
(0, 20)

Test our custom updater

Set up one updater that is forced to bridge into 8 updates, another that doesn't bridge at all, and one that bridges with many branches. We look at (a marginal of) the posterior in both cases to check that they are the same. We are basically making sure the code doesn't have bugs, we know this works in theory. We turn off resampling for a fair plot comparison, acknowledging however, the whole point of bridging is to allow resampling in between so that the effective particle size never drops too much. Note the steady decrease in effective particles over the 8 updates. They are about the same speed to run because of memoization.


In [36]:
fig = plt.figure(figsize=(10,4))
for idx, check_for_resample in enumerate([False, True]):

    updater1 = m.BridgedRPMUpdater(model, 16000, prior, n_ess_thresh=1)
    updater2 = m.BridgedRPMUpdater(model, 16000, prior, n_ess_thresh=16000, max_recursion=3)
    updater3 = m.BridgedRPMUpdater(model, 16000, prior, n_ess_thresh=16000, max_recursion=2, branch_size=10)

    var_mult = 16
    save_marginal_pics = True

    # pick an arbitrary experiment to update with
    idx_exp = 42
    ep = expparams[idx_exp]

    # unbatch data by summing over the batch index
    x,y,z = np.sum(outcomes[:, idx_exp], axis=0)

    # define function that updates alpha and beta indices
    def ref_updater(updater, x, y):
        posterior = ref_single_uncorr_posterior(x, y, emodes[idx_exp], var_mult=var_mult)
        updater.particle_locations[:,-2:] = posterior.sample(updater.n_particles)

    # do the update on each updater
    for u in [updater1, updater2, updater3]:
        t1 = datetime.now()
        ref_updater(u, x, y)
        n_ess = u.update(z, ep, check_for_resample=check_for_resample)
        dt = datetime.now() - t1
        print dt.total_seconds(), n_ess

    plt.subplot(1,2,idx+1)
    updater1.plot_posterior_marginal()
    updater2.plot_posterior_marginal()
    updater3.plot_posterior_marginal()
    plt.legend(['Unabridged', '8 Bridges', '100 Bridges'],loc=4)
    plt.xlabel('$\Omega$ (MHz)')

    if check_for_resample:
        plt.title('Posterior Comparison with Bridged Transitions \n No Resampling')
    else:
        plt.title('Posterior Comparison with Bridged Transitions \n Resampling when necessary')
        
export_figure(fig, 'qhl-bridge-transitions')


0.779579 1834
1.457896 [[[5491, 3815], [3067, 2627]], [[2330, 2113], [1947, 1815]]]
5.192916 [[13385, 11429, 10123, 9144, 8374, 7751, 7235, 6801, 6429, 6107], [5825, 5575, 5352, 5152, 4970, 4805, 4655, 4516, 4389, 4270], [4161, 4058, 3963, 3873, 3789, 3710, 3635, 3564, 3497, 3434], [3373, 3316, 3261, 3209, 3159, 3111, 3065, 3022, 2979, 2939], [2900, 2863, 2827, 2792, 2758, 2726, 2694, 2664, 2635, 2606], [2579, 2552, 2526, 2501, 2477, 2453, 2430, 2408, 2386, 2365], [2344, 2324, 2304, 2285, 2266, 2248, 2230, 2213, 2196, 2179], [2163, 2147, 2131, 2116, 2101, 2087, 2072, 2058, 2045, 2031], [2018, 2005, 1992, 1980, 1967, 1955, 1944, 1932, 1921, 1909], [1898, 1888, 1877, 1866, 1856, 1846, 1836, 1826, 1816, 1807]]
1.029735 1793
/home/cgranade/anaconda3/envs/nvmeas/lib/python2.7/site-packages/qinfer/utils.py:271: ApproximationWarning: Numerical error in covariance estimation causing positive semidefinite violation.
  warnings.warn('Numerical error in covariance estimation causing positive semidefinite violation.', ApproximationWarning)
3.788718 [[[5526, 8803], [6571, 10170]], [[7922, 10995], [8775, 7505]]]
8.407282 [[13412, 11457, 10138, 9142, 8354, 7714, 15160, 14049, 13062, 12216], [11492, 10867, 10323, 9845, 9422, 9044, 8705, 8398, 8119, 7864], [15397, 14578, 13825, 13157, 12567, 12043, 11573, 11150, 10766, 10417], [10096, 9801, 9528, 9275, 9039, 8819, 8613, 8419, 8237, 8065], [7903, 15561, 14927, 14315, 13753, 13241, 12775, 12350, 11960, 11602], [11271, 10965, 10681, 10417, 10169, 9938, 9720, 9516, 9323, 9140], [8968, 8804, 8648, 8500, 8359, 8224, 8095, 7972, 15690, 15207], [14720, 14258, 13827, 13427, 13056, 12711, 12390, 12091, 11810, 11548], [11301, 11068, 10849, 10642, 10446, 10259, 10082, 9914, 9754, 9600], [9454, 9314, 9180, 9051, 8928, 8809, 8694, 8584, 8478, 8376]]
Overwriting ../fig/qhl-bridge-transitions.png
Overwriting ../fig/qhl-bridge-transitions.pdf

In [37]:
def gamma_pdf(x, mean, sigma):
    a, b = (mean / sigma) **2, mean / sigma **2
    return np.exp(xlogy(a, b) + xlogy(a-1, x) - b * x - gammaln(a))

def normalize(v):
    return v / np.max(v)

x = 40
mean = np.linspace(1,100,100)
sigma = 10
a, b = (mean / sigma) **2, mean / sigma **2
plt.plot(mean, normalize(gamma_pdf(x, mean, sigma)), label='exact')
plt.plot(mean, normalize(gamma_pdf(x/2, mean/2, sigma/np.sqrt(2)) * gamma_pdf(x/2, mean/2, sigma/np.sqrt(2))),label='approx')

correction = np.exp(gammaln(a/2) + xlogy(a, 2)/2  - gammaln(a)/2)
plt.plot(mean, 0.01 + normalize(correction **2 * gamma_pdf(x/2, mean/2, sigma/np.sqrt(2)) * gamma_pdf(x/2, mean/2, sigma/np.sqrt(2))),label='with correction')

plt.legend()
plt.title('Testing Bridging a Gamma Distribution')


Out[37]:
<matplotlib.text.Text at 0x7f7ce89665d0>

Test how many engines to use

We do some tests to see how well likelihood evaluation does on varying numbers of CPU cores. We have found that using all physical cores (and not hyperthreaded cores), or maybe just a couple of hyperthreaded cores, is best.


In [38]:
do_par_test = True
if parallel and do_par_test:
    
    test_n_particles = 20000
    
    def compute_L(model):
        model.likelihood(outcomes[0,:10,2],prior.sample(n=test_n_particles),np.array(expparams[0]).flatten()[:10])
        
    serial_time = %timeit -q -o -n1 -r1 compute_L(ref_model)
    serial_time = serial_time.all_runs[0]
    
    n_engines = np.arange(2,len(dview)+1,2)
    par_time = np.zeros(n_engines.shape[0])
    
    for idx_ne, ne in enumerate(n_engines):
        dview_test = rc[:ne]
        dview_test.use_dill()
        dview_test.execute('os.chdir("{}")'.format(os.getcwd()))
        dview_test.execute('import os')
        dview_test.execute('import qinfer as qi')
        dview_test.execute('import models')
        model_test = qi.DirectViewParallelizedModel(ref_model, dview_test, serial_threshold=1)

        result = %timeit -q -o -n1 -r1 compute_L(model_test)
        par_time[idx_ne] = result.all_runs[0]

In [39]:
if parallel and do_par_test:
    
    fig = plt.figure(y)
    fig.set_figheight(3)
    fig.set_figwidth(5)
    ax = fig.add_subplot(111)
    
    y_data = np.concatenate([[serial_time], par_time])/serial_time
    x_data = np.concatenate([[1], n_engines])
    
    plt.plot(x_data, y_data,'-o')
    plt.xlabel('# engines')
    plt.ylabel('Normalized Running Time')
    plt.title('Likelihood computation time for {} particles'.format(test_n_particles))
    plt.ylim([2**(-4),2^0])
    plt.xlim([0.9,26])
    ax.set_yscale('log', basey=2)
    ax.set_xscale('log', basey=2)
    ax.set_xticks([1,2,4,6,8,10,12,16,20,24])
    ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())

    export_figure(fig, 'parallel-likelihood-test')


Overwriting ../fig/parallel-likelihood-test.png
Overwriting ../fig/parallel-likelihood-test.pdf

Process Data

Plotting function for video


In [40]:
def plot_marginals(updater, covs):
    """
    """
    fig = plt.figure(figsize=(12,7))
    ranges = [[0,10], [0,10], [-5,5], [1.5,3.5], [100**-1,1**-1], [0,0]]
    
    plt.subplot(2,3,6)
    stop = np.nonzero(covs[:,0,0] == 0)[0]
    stop = stop[0] if len(stop) > 0 else covs.shape[0]
    plt.plot(np.log10(np.sqrt(np.diagonal(covs[:stop,:-3,:-3],axis1=1,axis2=2))),lw=2,linestyle='-')
    leg = plt.legend(["${}$".format(x) for x in ham_model.modelparam_names[:-1]],
               bbox_to_anchor=(0., 0, 1., .102), loc=3, ncol=5, mode="expand", borderaxespad=0.)
    for txt in leg.get_texts():
        txt.set_ha("center")
        txt.set_x(-25)
        txt.set_y(2)
    colors = []
    for line in leg.get_lines():
        line.set_ydata(line.get_ydata()-10) # y-position
        colors.append(line.get_color())
        
    plt.xlabel(r'# experiments included in estimate')
    plt.title(r'$\log_{10}(\mathrm{std}[\cdot]/\mathrm{MHz})$')
    plt.ylim([-6,1])
    plt.xlim([0,300])
    
    for idx, idx_param in enumerate([1,2,0,3,4]):
        plt.subplot(2,3,idx_param+1)
        updater.plot_posterior_marginal(
            idx_param,
            range_min=ranges[idx_param][0],
            range_max=ranges[idx_param][1],
            smoothing=0.0003,
            other_plot_args={'lw':1.5,'color':colors[idx]}
        )
        plt.ylim(bottom=0)
        plt.gca().set_yticklabels([])
        #plt.ylabel('$\Pr({})$'.format(ham_model.modelparam_names[idx_param]))
        plt.title('$\Pr[{}$ (MHz)$]$'.format(ham_model.modelparam_names[idx_param]))
        plt.xlabel('')
    plt.tight_layout()
    return fig

Run Inference on summed data


In [41]:
single_updater = m.BridgedRPMUpdater(model, 16000, prior, n_ess_thresh=2000)
single_means = np.zeros((n_rabi+n_ramsey, model.n_modelparams))
single_covs = np.zeros((n_rabi+n_ramsey, model.n_modelparams, model.n_modelparams))

var_mult = 16
save_marginal_pics = True

for idx_exp in range(n_rabi+n_ramsey):
    
    if save_marginal_pics: # make a video
        fig = plot_marginals(single_updater, single_covs)
        fig.savefig('../fig/video/posterior{0:03d}.png'.format(idx_exp))
        plt.close(fig)
    
    ep = expparams[idx_exp]
    # unbatch data by summing over the batch index
    x,y,z = np.sum(outcomes[:, idx_exp], axis=0)
    # update the posterior
    posterior = ref_single_uncorr_posterior(x, y, emodes[idx_exp], var_mult=var_mult)
    single_updater.particle_locations[:,-2:] = posterior.sample(single_updater.n_particles)
    t1 = datetime.now()
    n_ess = single_updater.update(z, ep)
    dt = datetime.now() - t1
    print ep, dt.total_seconds(), int(single_updater.min_n_ess), n_ess
        
    # record the first two moments for later plotting
    single_means[idx_exp,:] = single_updater.est_mean()
    single_covs[idx_exp,:,:] = single_updater.est_covariance_mtx()
    
    if np.mod(idx_exp+1, 50) == 0:
        print idx_exp, single_updater.est_mean()[:5]


[( 0.008,  0.,  0., 0, 0)] 0.672164 3743 3743
[( 0.016,  0.,  0., 0, 0)] 0.61758 3314 3314
[( 0.044,  0.01,  0., 1, 0)] 0.573107 2805 2805
[( 0.024,  0.,  0., 0, 0)] 0.589305 2805 9148
[( 0.032,  0.,  0., 0, 0)] 0.724999 2805 3809
[( 0.04,  0.,  0., 0, 0)] 1.467039 2805 [[4513, 9114], 3037]
[( 0.044,  0.02,  0., 1, 0)] 0.606462 2805 6465
[( 0.048,  0.,  0., 0, 0)] 0.737649 2805 3631
[( 0.056,  0.,  0., 0, 0)] 2.164848 2805 [[3798, 7029], 5222]
[( 0.044,  0.03,  0., 1, 0)] 0.606191 2805 8615
[( 0.044,  0.04,  0., 1, 0)] 0.735483 2805 3338
[( 0.044,  0.05,  0., 1, 0)] 0.669049 2543 2543
[( 0.064,  0.,  0., 0, 0)] 0.645185 2543 9962
[( 0.044,  0.06,  0., 1, 0)] 0.695 2543 4317
[( 0.072,  0.,  0., 0, 0)] 0.694906 2543 11770
[( 0.044,  0.07,  0., 1, 0)] 0.646097 2543 7279
[( 0.044,  0.08,  0., 1, 0)] 0.607473 2543 5433
[( 0.08,  0.,  0., 0, 0)] 0.927698 2543 9976
[( 0.088,  0.,  0., 0, 0)] 0.639194 2543 4199
[( 0.096,  0.,  0., 0, 0)] 0.653165 2543 12390
[( 0.044,  0.09,  0., 1, 0)] 0.639465 2543 3618
[( 0.044,  0.1,  0., 1, 0)] 0.689453 2543 11013
[( 0.044,  0.11,  0., 1, 0)] 1.341979 2469 [2469, 9071]
[( 0.044,  0.12,  0., 1, 0)] 1.368432 2469 [2754, 12387]
[( 0.104,  0.,  0., 0, 0)] 0.65859 2469 5008
[( 0.044,  0.13,  0., 1, 0)] 0.616358 2469 5180
[( 0.044,  0.14,  0., 1, 0)] 1.498151 2469 [[4026, 8984], 3361]
[( 0.044,  0.15,  0., 1, 0)] 0.670094 2469 9897
[( 0.044,  0.16,  0., 1, 0)] 0.653931 2469 5394
[( 0.044,  0.17,  0., 1, 0)] 0.606867 2469 10845
[( 0.044,  0.18,  0., 1, 0)] 0.682579 2469 7193
[( 0.112,  0.,  0., 0, 0)] 0.729617 2469 11258
[( 0.044,  0.19,  0., 1, 0)] 0.677721 2469 8615
[( 0.044,  0.2,  0., 1, 0)] 0.683698 2469 4494
[( 0.12,  0.,  0., 0, 0)] 0.694307 2469 8955
got unknown result: afb50df6-7d5ff2c7e37b2716dd0c57b1
[( 0.128,  0.,  0., 0, 0)] 2.448563 2469 [[4944, 8671], [2785, 8756]]
[( 0.044,  0.21,  0., 1, 0)] 0.770308 2469 6649
[( 0.044,  0.22,  0., 1, 0)] 0.691301 2469 4221
[( 0.136,  0.,  0., 0, 0)] 0.658929 2469 12963
[( 0.144,  0.,  0., 0, 0)] 0.716094 2469 6282
[( 0.152,  0.,  0., 0, 0)] 0.747022 2469 13688
[( 0.044,  0.23,  0., 1, 0)] 0.655296 2469 11486
[( 0.044,  0.24,  0., 1, 0)] 0.677632 2469 9855
[( 0.044,  0.25,  0., 1, 0)] 0.660367 2469 7359
[( 0.16,  0.,  0., 0, 0)] 0.745498 2469 13879
[( 0.044,  0.26,  0., 1, 0)] 0.679718 2469 12092
[( 0.044,  0.27,  0., 1, 0)] 0.757266 2469 9648
[( 0.044,  0.28,  0., 1, 0)] 0.683159 2469 5104
[( 0.044,  0.29,  0., 1, 0)] 0.65653 2469 14596
[( 0.168,  0.,  0., 0, 0)] 0.725539 2469 12898
49 [ 5.59870953  1.43268579 -0.13084192  2.17377638  0.02873493]
[( 0.044,  0.3,  0., 1, 0)] 0.740706 2469 11087
[( 0.044,  0.31,  0., 1, 0)] 0.719993 2469 6917
[( 0.044,  0.32,  0., 1, 0)] 0.67953 2469 13719
[( 0.176,  0.,  0., 0, 0)] 0.644881 2469 11842
[( 0.184,  0.,  0., 0, 0)] 0.663615 2469 9404
[( 0.044,  0.33,  0., 1, 0)] 0.756774 2469 3202
[( 0.044,  0.34,  0., 1, 0)] 0.684314 2469 12150
[( 0.044,  0.35,  0., 1, 0)] 0.751171 2469 11048
[( 0.044,  0.36,  0., 1, 0)] 0.743037 2469 6928
[( 0.044,  0.37,  0., 1, 0)] 0.703934 2469 14179
[( 0.044,  0.38,  0., 1, 0)] 0.682797 2469 12179
[( 0.044,  0.39,  0., 1, 0)] 0.717627 2469 8727
[( 0.044,  0.4,  0., 1, 0)] 0.726522 2469 5171
[( 0.192,  0.,  0., 0, 0)] 0.675424 2469 8312
[( 0.044,  0.41,  0., 1, 0)] 1.561048 2469 [3871, 10301]
[( 0.044,  0.42,  0., 1, 0)] 0.701855 2469 3904
[( 0.044,  0.43,  0., 1, 0)] 0.675616 2469 10612
[( 0.044,  0.44,  0., 1, 0)] 0.667333 2469 8606
[( 0.044,  0.45,  0., 1, 0)] 0.753577 2469 6238
[( 0.2,  0.,  0., 0, 0)] 0.750635 2469 9173
[( 0.044,  0.46,  0., 1, 0)] 0.76566 2469 5604
[( 0.044,  0.47,  0., 1, 0)] 0.753641 2469 14242
[( 0.044,  0.48,  0., 1, 0)] 0.72208 2469 12087
[( 0.208,  0.,  0., 0, 0)] 0.713647 2469 10031
[( 0.044,  0.49,  0., 1, 0)] 0.751829 2469 5452
[( 0.044,  0.5,  0., 1, 0)] 0.733033 2469 12659
[( 0.044,  0.51,  0., 1, 0)] 0.788223 2469 10107
[( 0.044,  0.52,  0., 1, 0)] 0.789312 2469 9221
[( 0.216,  0.,  0., 0, 0)] 1.96157 2469 [3794, 6776]
[( 0.224,  0.,  0., 0, 0)] 0.893935 2469 5100
[( 0.044,  0.53,  0., 1, 0)] 1.127998 2469 14699
[( 0.044,  0.54,  0., 1, 0)] 1.02731 2469 13699
[( 0.232,  0.,  0., 0, 0)] 2.233894 2469 [2808, 3849]
[( 0.24,  0.,  0., 0, 0)] 0.976385 2469 5555
[( 0.044,  0.55,  0., 1, 0)] 0.938564 2469 14454
[( 0.248,  0.,  0., 0, 0)] 0.993348 2469 8369
[( 0.044,  0.56,  0., 1, 0)] 0.849488 2469 6646
[( 0.044,  0.57,  0., 1, 0)] 1.033896 2469 14960
[( 0.044,  0.58,  0., 1, 0)] 0.877273 2469 13096
[( 0.044,  0.59,  0., 1, 0)] 0.937786 2469 11795
[( 0.044,  0.6,  0., 1, 0)] 0.965084 2469 6031
[( 0.044,  0.61,  0., 1, 0)] 0.886779 2469 6023
[( 0.256,  0.,  0., 0, 0)] 0.938339 2469 9193
[( 0.044,  0.62,  0., 1, 0)] 0.844159 2469 4218
[( 0.044,  0.63,  0., 1, 0)] 0.941985 2469 13436
[( 0.044,  0.64,  0., 1, 0)] 0.866426 2469 6018
[( 0.264,  0.,  0., 0, 0)] 0.983846 2469 3617
[( 0.272,  0.,  0., 0, 0)] 0.801912 2469 11451
[( 0.044,  0.65,  0., 1, 0)] 0.923168 2469 6179
[( 0.044,  0.66,  0., 1, 0)] 0.887258 2469 8916
99 [ 5.56514058  1.43214261 -0.23510673  2.16842051  0.01815051]
[( 0.044,  0.67,  0., 1, 0)] 1.712811 2469 [3776, 9048]
[( 0.044,  0.68,  0., 1, 0)] 0.95773 2469 7845
[( 0.28,  0.,  0., 0, 0)] 0.904082 2469 13635
[( 0.288,  0.,  0., 0, 0)] 0.88843 2469 4856
[( 0.044,  0.69,  0., 1, 0)] 0.90674 2469 8533
[( 0.044,  0.7,  0., 1, 0)] 0.866731 2469 7947
[( 0.296,  0.,  0., 0, 0)] 0.866775 2469 8642
[( 0.304,  0.,  0., 0, 0)] 0.901643 2469 5345
[( 0.312,  0.,  0., 0, 0)] 0.891865 2469 4209
[( 0.044,  0.71,  0., 1, 0)] 0.853504 2469 15058
[( 0.32,  0.,  0., 0, 0)] 0.948092 2469 4593
[( 0.044,  0.72,  0., 1, 0)] 1.021484 2469 14949
[( 0.044,  0.73,  0., 1, 0)] 0.865661 2469 13924
[( 0.328,  0.,  0., 0, 0)] 0.869134 2469 2941
[( 0.044,  0.74,  0., 1, 0)] 0.940222 2469 12813
[( 0.044,  0.75,  0., 1, 0)] 0.862431 2469 10177
[( 0.044,  0.76,  0., 1, 0)] 0.883196 2469 8733
[( 0.044,  0.77,  0., 1, 0)] 0.850186 2469 6554
[( 0.044,  0.78,  0., 1, 0)] 0.850012 2469 11863
[( 0.044,  0.79,  0., 1, 0)] 0.829579 2469 9176
[( 0.044,  0.8,  0., 1, 0)] 0.828838 2469 6264
[( 0.044,  0.81,  0., 1, 0)] 0.834232 2469 7951
[( 0.044,  0.82,  0., 1, 0)] 0.869131 2469 12841
[( 0.336,  0.,  0., 0, 0)] 0.876876 2469 9695
[( 0.044,  0.83,  0., 1, 0)] 1.735037 2217 [2217, 4657]
[( 0.344,  0.,  0., 0, 0)] 0.84088 2217 13987
[( 0.044,  0.84,  0., 1, 0)] 0.811323 2217 12200
[( 0.044,  0.85,  0., 1, 0)] 0.909297 2217 9640
[( 0.044,  0.86,  0., 1, 0)] 0.907024 2217 8278
[( 0.044,  0.87,  0., 1, 0)] 1.035666 2217 7251
[( 0.044,  0.88,  0., 1, 0)] 0.936189 2217 12776
[( 0.044,  0.89,  0., 1, 0)] 1.297076 2217 11530
[( 0.044,  0.9,  0., 1, 0)] 2.583815 2217 [4459, 6833]
[( 0.352,  0.,  0., 0, 0)] 1.011131 2217 12844
[( 0.044,  0.91,  0., 1, 0)] 1.112666 2217 11246
[( 0.36,  0.,  0., 0, 0)] 0.998065 2217 5446
[( 0.044,  0.92,  0., 1, 0)] 0.94878 2217 14518
[( 0.368,  0.,  0., 0, 0)] 1.019729 2217 11641
[( 0.376,  0.,  0., 0, 0)] 0.883354 2217 8843
[( 0.384,  0.,  0., 0, 0)] 0.902014 2217 7267
[( 0.044,  0.93,  0., 1, 0)] 0.895061 2217 8329
[( 0.044,  0.94,  0., 1, 0)] 0.814743 2217 7233
[( 0.392,  0.,  0., 0, 0)] 1.817701 2217 [5659, 5768]
[( 0.044,  0.95,  0., 1, 0)] 0.836886 2217 12782
[( 0.4,  0.,  0., 0, 0)] 1.810302 2217 [3114, 3865]
[( 0.044,  0.96,  0., 1, 0)] 0.868859 2217 10947
[( 0.044,  0.97,  0., 1, 0)] 1.049389 2217 9752
[( 0.044,  0.98,  0., 1, 0)] 0.99541 2217 9091
[( 0.044,  0.99,  0., 1, 0)] 0.971657 2217 4219
[( 0.044,  1.,  0., 1, 0)] 0.904486 2217 8687
149 [ 5.57121896  1.4358115   0.12250343  2.16601984  0.02591162]
[( 0.044,  1.01,  0., 1, 0)] 2.029897 2217 [3756, 8241]
[( 0.408,  0.,  0., 0, 0)] 2.900398 2217 [[4472, 8678], [3268, 9090]]
[( 0.044,  1.02,  0., 1, 0)] 0.814549 2217 8102
[( 0.416,  0.,  0., 0, 0)] 2.999666 2217 [[4209, 9471], [3737, 10015]]
[( 0.424,  0.,  0., 0, 0)] 0.900531 2217 9310
[( 0.432,  0.,  0., 0, 0)] 0.918337 2217 7161
[( 0.44,  0.,  0., 0, 0)] 1.064074 2217 11018
[( 0.044,  1.03,  0., 1, 0)] 1.059962 2217 7101
[( 0.044,  1.04,  0., 1, 0)] 1.013971 2217 10244
[( 0.448,  0.,  0., 0, 0)] 0.938976 2217 5196
[( 0.044,  1.05,  0., 1, 0)] 1.051253 2217 2671
[( 0.044,  1.06,  0., 1, 0)] 0.932419 2217 2549
[( 0.044,  1.07,  0., 1, 0)] 0.833488 2217 7722
[( 0.044,  1.08,  0., 1, 0)] 0.853159 2217 3640
[( 0.044,  1.09,  0., 1, 0)] 0.850693 2217 11880
[( 0.044,  1.1,  0., 1, 0)] 1.832175 2217 [4970, 6652]
[( 0.044,  1.11,  0., 1, 0)] 0.864506 2217 8501
[( 0.044,  1.12,  0., 1, 0)] 2.362894 2217 [2929, 6412]
[( 0.044,  1.13,  0., 1, 0)] 1.058943 2217 5157
[( 0.456,  0.,  0., 0, 0)] 1.010641 2217 10824
[( 0.044,  1.14,  0., 1, 0)] 2.128804 2217 [4667, 7310]
[( 0.044,  1.15,  0., 1, 0)] 0.941249 2217 5741
[( 0.464,  0.,  0., 0, 0)] 0.884076 2217 10892
[( 0.044,  1.16,  0., 1, 0)] 0.906515 2217 7849
[( 0.044,  1.17,  0., 1, 0)] 0.971917 2217 12750
[( 0.044,  1.18,  0., 1, 0)] 0.897464 2217 10641
[( 0.472,  0.,  0., 0, 0)] 0.944066 2217 9888
[( 0.044,  1.19,  0., 1, 0)] 0.92415 2217 8746
[( 0.044,  1.2,  0., 1, 0)] 1.043892 2217 2890
[( 0.044,  1.21,  0., 1, 0)] 1.073342 2217 9952
[( 0.48,  0.,  0., 0, 0)] 0.932506 2217 2326
[( 0.044,  1.22,  0., 1, 0)] 1.019064 2217 10239
[( 0.044,  1.23,  0., 1, 0)] 0.927728 2217 9554
[( 0.044,  1.24,  0., 1, 0)] 0.962912 2217 6145
[( 0.488,  0.,  0., 0, 0)] 1.966005 2217 [4389, 4386]
[( 0.044,  1.25,  0., 1, 0)] 0.84785 2217 9692
[( 0.496,  0.,  0., 0, 0)] 1.983779 2217 [4804, 7886]
[( 0.044,  1.26,  0., 1, 0)] 1.062251 2217 9332
[( 0.044,  1.27,  0., 1, 0)] 1.153629 2217 8099
[( 0.504,  0.,  0., 0, 0)] 1.159214 2217 7408
[( 0.044,  1.28,  0., 1, 0)] 1.019602 2217 10680
[( 0.044,  1.29,  0., 1, 0)] 1.000948 2217 6477
[( 0.044,  1.3,  0., 1, 0)] 1.040071 2217 2896
[( 0.044,  1.31,  0., 1, 0)] 1.97133 2217 [6299, 6763]
[( 0.512,  0.,  0., 0, 0)] 0.947228 2217 8265
[( 0.044,  1.32,  0., 1, 0)] 1.897423 2217 [3009, 6002]
[( 0.044,  1.33,  0., 1, 0)] 4.051962 2217 [[7062, 7309], [7330, 7505]]
[( 0.044,  1.34,  0., 1, 0)] 1.913192 2217 [3922, 4038]
[( 0.044,  1.35,  0., 1, 0)] 1.937327 2217 [3062, 3328]
[( 0.044,  1.36,  0., 1, 0)] 1.007306 2217 14263
199 [  5.56663574e+00   1.43390664e+00   2.46877175e-03   2.16936160e+00
   2.94266216e-02]
[( 0.044,  1.37,  0., 1, 0)] 0.898691 2217 4563
[( 0.52,  0.,  0., 0, 0)] 0.919441 2217 2887
[( 0.044,  1.38,  0., 1, 0)] 0.856415 2217 12313
[( 0.044,  1.39,  0., 1, 0)] 0.838226 2217 7466
[( 0.528,  0.,  0., 0, 0)] 0.82184 2217 14299
[( 0.044,  1.4,  0., 1, 0)] 0.848492 2217 8405
[( 0.536,  0.,  0., 0, 0)] 0.905179 2217 4337
[( 0.544,  0.,  0., 0, 0)] 0.871824 2217 10232
[( 0.044,  1.41,  0., 1, 0)] 0.94542 2217 9578
[( 0.552,  0.,  0., 0, 0)] 0.962039 2217 5778
[( 0.56,  0.,  0., 0, 0)] 0.918707 2217 8500
[( 0.044,  1.42,  0., 1, 0)] 0.890495 2217 6488
[( 0.044,  1.43,  0., 1, 0)] 0.81757 2217 14778
[( 0.044,  1.44,  0., 1, 0)] 0.799649 2217 13856
[( 0.044,  1.45,  0., 1, 0)] 0.844399 2217 9006
[( 0.044,  1.46,  0., 1, 0)] 1.91832 2217 [2923, 5174]
[( 0.044,  1.47,  0., 1, 0)] 0.8442 2217 14722
[( 0.044,  1.48,  0., 1, 0)] 0.927489 2217 13334
[( 0.568,  0.,  0., 0, 0)] 0.787418 2217 10864
[( 0.576,  0.,  0., 0, 0)] 0.842903 2217 9603
[( 0.584,  0.,  0., 0, 0)] 0.879604 2217 7138
[( 0.044,  1.49,  0., 1, 0)] 0.869043 2217 5494
[( 0.044,  1.5,  0., 1, 0)] 0.8667 2217 12278
[( 0.592,  0.,  0., 0, 0)] 0.81355 2217 11227
[( 0.6,  0.,  0., 0, 0)] 0.795609 2217 5391
[( 0.044,  1.51,  0., 1, 0)] 0.893687 2217 5524
[( 0.044,  1.52,  0., 1, 0)] 0.852656 2217 5116
[( 0.608,  0.,  0., 0, 0)] 0.893857 2217 5232
[( 0.616,  0.,  0., 0, 0)] 0.947833 2217 12291
[( 0.044,  1.53,  0., 1, 0)] 0.968875 2217 6336
[( 0.044,  1.54,  0., 1, 0)] 1.067619 2217 3418
[( 0.044,  1.55,  0., 1, 0)] 2.033978 2217 [2706, 2826]
[( 0.044,  1.56,  0., 1, 0)] 1.989324 2217 [4669, 4878]
[( 0.044,  1.57,  0., 1, 0)] 1.945417 2217 [6373, 6609]
[( 0.044,  1.58,  0., 1, 0)] 0.893786 2217 2265
[( 0.044,  1.59,  0., 1, 0)] 0.908788 2217 9859
[( 0.044,  1.6,  0., 1, 0)] 0.895502 2217 9043
[( 0.044,  1.61,  0., 1, 0)] 0.880319 2217 8410
[( 0.044,  1.62,  0., 1, 0)] 1.100974 2217 7631
[( 0.044,  1.63,  0., 1, 0)] 1.134668 2217 14884
[( 0.624,  0.,  0., 0, 0)] 1.077197 2217 13046
[( 0.044,  1.64,  0., 1, 0)] 1.052696 2217 11902
[( 0.044,  1.65,  0., 1, 0)] 0.989219 2217 5951
[( 0.044,  1.66,  0., 1, 0)] 1.004764 2217 15155
[( 0.632,  0.,  0., 0, 0)] 1.064392 2217 10869
[( 0.044,  1.67,  0., 1, 0)] 1.092186 2217 4609
[( 0.044,  1.68,  0., 1, 0)] 1.974987 2217 [6517, 6535]
[( 0.64,  0.,  0., 0, 0)] 0.965818 2217 10000
[( 0.648,  0.,  0., 0, 0)] 0.916361 2217 7287
[( 0.044,  1.69,  0., 1, 0)] 1.035046 2217 13359
249 [ 5.56701801  1.43534848  0.00719238  2.16743667  0.03070029]
[( 0.044,  1.7,  0., 1, 0)] 0.961313 2217 9528
[( 0.656,  0.,  0., 0, 0)] 0.927393 2217 2462
[( 0.044,  1.71,  0., 1, 0)] 0.893515 2217 14242
[( 0.044,  1.72,  0., 1, 0)] 0.978117 2217 5380
[( 0.664,  0.,  0., 0, 0)] 0.853026 2217 8050
[( 0.044,  1.73,  0., 1, 0)] 0.942521 2217 3601
[( 0.044,  1.74,  0., 1, 0)] 1.773441 2217 [6650, 6783]
[( 0.044,  1.75,  0., 1, 0)] 0.824926 2217 12143
[( 0.672,  0.,  0., 0, 0)] 1.910784 2217 [5157, 6854]
[( 0.68,  0.,  0., 0, 0)] 0.913308 2217 12285
[( 0.044,  1.76,  0., 1, 0)] 0.915638 2217 7180
[( 0.688,  0.,  0., 0, 0)] 1.003475 2217 9492
[( 0.044,  1.77,  0., 1, 0)] 1.061912 2217 6174
[( 0.044,  1.78,  0., 1, 0)] 0.987059 2217 15044
[( 0.696,  0.,  0., 0, 0)] 0.924932 2217 13028
[( 0.704,  0.,  0., 0, 0)] 0.92289 2217 5926
[( 0.712,  0.,  0., 0, 0)] 0.875946 2217 14527
[( 0.044,  1.79,  0., 1, 0)] 1.948542 2217 [4250, 4872]
[( 0.044,  1.8,  0., 1, 0)] 0.929867 2217 5561
[( 0.72,  0.,  0., 0, 0)] 0.912518 2217 13751
[( 0.044,  1.81,  0., 1, 0)] 1.913415 2217 [3950, 4783]
[( 0.728,  0.,  0., 0, 0)] 0.966828 2217 14223
[( 0.044,  1.82,  0., 1, 0)] 1.930312 2217 [4228, 4758]
[( 0.044,  1.83,  0., 1, 0)] 1.893379 2217 [4623, 4828]
[( 0.736,  0.,  0., 0, 0)] 1.10951 2217 5900
[( 0.044,  1.84,  0., 1, 0)] 1.065585 2217 2899
[( 0.044,  1.85,  0., 1, 0)] 1.029931 2217 15008
[( 0.044,  1.86,  0., 1, 0)] 1.061936 2217 2943
[( 0.044,  1.87,  0., 1, 0)] 1.000131 2217 14731
[( 0.744,  0.,  0., 0, 0)] 0.936057 2217 4117
[( 0.752,  0.,  0., 0, 0)] 1.028641 2217 9775
[( 0.044,  1.88,  0., 1, 0)] 0.861115 2217 7091
[( 0.76,  0.,  0., 0, 0)] 0.937958 2217 15058
[( 0.044,  1.89,  0., 1, 0)] 0.903768 2217 7043
[( 0.768,  0.,  0., 0, 0)] 0.908419 2217 14882
[( 0.776,  0.,  0., 0, 0)] 0.846605 2217 5752
[( 0.044,  1.9,  0., 1, 0)] 0.831642 2217 5993
[( 0.044,  1.91,  0., 1, 0)] 0.865996 2217 15071
[( 0.044,  1.92,  0., 1, 0)] 0.910182 2217 13925
[( 0.044,  1.93,  0., 1, 0)] 0.948514 2217 10190
[( 0.044,  1.94,  0., 1, 0)] 1.058675 2217 3019
[( 0.044,  1.95,  0., 1, 0)] 1.038866 2217 14746
[( 0.784,  0.,  0., 0, 0)] 0.936292 2217 6581
[( 0.044,  1.96,  0., 1, 0)] 0.929262 2217 12788
[( 0.044,  1.97,  0., 1, 0)] 0.95247 2217 6194
[( 0.044,  1.98,  0., 1, 0)] 2.247138 2217 [3057, 3091]
[( 0.044,  1.99,  0., 1, 0)] 1.090756 2217 6904
[( 0.792,  0.,  0., 0, 0)] 0.919853 2217 10616
[( 0.044,  2.,  0., 1, 0)] 1.997287 2217 [3131, 4618]
[( 0.8,  0.,  0., 0, 0)] 0.866411 2217 14901
299 [ 5.56830804  1.43458525  0.02102186  2.16754303  0.03130383]

In [43]:
fig = plt.figure(figsize=(12,12))
plt_count = 0
plt_units = ["kHz"] * 5
# bah subplot indexing is too confusing to automate
for x_param, y_param, plt_count in [(0,1,1),  (0,2,5),(1,2,6),  (0,3,9),(1,3,10),(2,3,11), (0,4,13),(1,4,14),(2,4,15),(3,4,16)]:
    plt.subplot(5-1,5-1,plt_count)
    #plt.contour(
    #    *single_updater.posterior_mesh(idx_param1=x_param, idx_param2=y_param, res1=10, res2=10, smoothing=0.0002)
    #)
    mean_x = np.sum(single_updater.particle_weights * single_updater.particle_locations[:,x_param])
    mean_y = np.sum(single_updater.particle_weights * single_updater.particle_locations[:,y_param])
    plt.scatter(
        1e3 * (single_updater.particle_locations[:,x_param]-mean_x),
        1e3 * (single_updater.particle_locations[:,y_param]-mean_y),
        s = single_updater.particle_weights * 1000
    )
    plt.xlabel(r'$[{0}-{1:.2f}]$ ({2})'.format(model.modelparam_names[x_param], 1000 * mean_x, plt_units[x_param]))
    plt.ylabel(r'$[{0}-{1:.2f}]$ ({2})'.format(model.modelparam_names[y_param], 1000 * mean_y, plt_units[y_param]))

plt.tight_layout()
    
export_figure(fig, 'qhl-two-param-marginals')


Overwriting ../fig/qhl-two-param-marginals.png
Overwriting ../fig/qhl-two-param-marginals.pdf

In [44]:
fig = plt.figure()
lines = plt.plot(np.log10(np.sqrt(np.diagonal(single_covs[:,:-3,:-3],axis1=1,axis2=2))))
for style, line in zip(["-","--","-.",":","--"], lines):
    line.set_linestyle(style)
lines[-1].set_dashes([10,1,1,1,1,1,1,1])
plt.legend(["${}$".format(x) for x in model.modelparam_names[:-3]],
    ncol=2,
    bbox_to_anchor=(0., 0.6, 1., 0.0)
)
plt.xlabel(r'# experiments included in estimate')
plt.ylabel(r'$\log_{10}(\mathrm{std}[\cdot]/\mathrm{MHz})$')
plt.title('Uncertainty of Hamiltonian Parameters')

export_figure(fig, '../fig/qhl-param-uncertainty')


Overwriting ../fig/../fig/qhl-param-uncertainty.png
Overwriting ../fig/../fig/qhl-param-uncertainty.pdf

In [ ]:
fig = plt.figure()
lines = plt.plot(single_means[:,:-3], lw=2)
for style, line in zip(["-","--","-.",":","--"], lines):
    line.set_linestyle(style)
lines[-1].set_dashes([10,1,1,1,1,1,1,1])
plt.legend(["${}$".format(x) for x in model.modelparam_names[:-3]],
    ncol=2,
    bbox_to_anchor=(0., 0.7, 1., 0.08)
)

plt.xlabel(r'# experiments included in estimate')
plt.ylabel(r'(MHz)')
plt.title('Hamiltonian Parameter Estimates')

export_figure(fig, 'qhl-param-estimates')


Overwriting ../fig/qhl-param-estimates.png
Overwriting ../fig/qhl-param-estimates.pdf

Run Inference on each batch

Now we run the inference engine on each of the ten batches separately. On our 12 core setup, this takes about 15 mins per batch.


In [ ]:
updaters = [None] * n_batch
track_means = np.zeros((n_batch, n_rabi+n_ramsey, model.n_modelparams))
track_covs = np.zeros((n_batch, n_rabi+n_ramsey, model.n_modelparams, model.n_modelparams))

var_mult = 16

for idx_batch in range(10):
    print "  "
    print "--------------------" 
    print "Starting batch {} at {}...".format(idx_batch, datetime.now())
    print "--------------------"
    updaters[idx_batch] = m.BridgedRPMUpdater(model, 8000, prior, n_ess_thresh=1000)

    for idx_exp in range(n_rabi+n_ramsey):
        ep = expparams[idx_exp]
        
        x,y,z = outcomes[idx_batch, idx_exp]
        # update the posterior
        posterior = ref_uncorr_posterior(x, y, emodes[idx_exp], idx_batch, var_mult=var_mult)
        updaters[idx_batch].particle_locations[:,-2:] = posterior.sample(updaters[idx_batch].n_particles)
        updaters[idx_batch].update(z, ep)
        
        track_means[idx_batch, idx_exp,:] = updaters[idx_batch].est_mean()
        track_covs[idx_batch, idx_exp,:,:] = updaters[idx_batch].est_covariance_mtx()
        if np.mod(idx_exp+1, 50) == 0:
            print idx_exp, updaters[idx_batch].est_mean()[:5], updaters[idx_batch].min_n_ess


  
--------------------
Starting batch 0 at 2017-05-30 12:15:54.169475...
--------------------
49 [ 5.64386534  1.25195746  0.17521967  2.40028872  0.14870601] 1136.55935098
99 [ 5.56988683  1.42007202  0.51385622  2.15916148  0.01626591] 1123.40127801
149 [ 5.54924563  1.43918318  0.36190995  2.15643333  0.02716531] 1123.40127801
199 [ 5.55465474  1.42990708  0.15785469  2.16992804  0.0414457 ] 1046.30893124
249 [ 5.56528561  1.43046022  0.19826768  2.17203167  0.0424931 ] 1046.30893124
299 [ 5.5630673   1.42782252  0.1698839   2.17268966  0.04486476] 1046.30893124
  
--------------------
Starting batch 1 at 2017-05-30 12:18:14.515933...
--------------------
49 [ 5.64976654  1.38229141  0.2682215   2.232697    0.15627433] 1410.92201691
99 [ 5.57363813  1.45053819  0.41233507  2.1883367   0.03157793] 1098.03307962
149 [ 5.57472836  1.42895379  0.46153304  2.18828656  0.03089042] 1098.03307962
199 [ 5.57451523  1.4286086   0.61275684  2.18015133  0.03452481] 1098.03307962
249 [ 5.56314603  1.42966995  0.43362573  2.1780206   0.03931013] 1098.03307962
299 [ 5.56016817  1.42805946  0.42768487  2.17620174  0.04160195] 1098.03307962
  
--------------------
Starting batch 2 at 2017-05-30 12:20:30.390653...
--------------------
49 [ 5.53350241  1.40787356  0.35114418  2.16884775  0.09323851] 1100.15004998
99 [ 5.54357163  1.42831258  0.27057488  2.170178    0.02834991] 1100.15004998
49 [ 5.68711343  1.39572803 -1.03693685  2.34067067  0.08018515] 1066.94981589
99 [ 5.54346278  1.43970798 -1.33683396  2.19961895  0.02771237] 1066.94981589
149 [ 5.55471491  1.43524211 -1.09729913  2.18483126  0.03009804] 1066.94981589
199 [ 5.54777367  1.43275158 -1.04538015  2.17816556  0.04004206] 1058.1485585
249 [ 5.5473963   1.43437447 -0.93890197  2.17483216  0.04164817] 1058.1485585
299 [ 5.55062527  1.43532618 -0.93260891  2.17180482  0.04039816] 1058.1485585
  
--------------------
Starting batch 4 at 2017-05-30 12:25:16.942066...
--------------------
49 [ 5.55942011  1.32013848  0.13256971  2.35456953  0.07364401] 1202.32787748
99 [ 5.52666105  1.438691    0.74963939  2.19713883  0.01716392] 1085.81164674
149 [ 5.54509998  1.44018464  0.58385257  2.17864551  0.03053919] 1085.81164674
199 [ 5.55139241  1.43961639  0.6076286   2.17900883  0.03599955] 1085.81164674
249 [ 5.55945041  1.43892907  0.51753092  2.17839345  0.04231683] 1085.81164674
299 [ 5.56008259  1.43283419  0.56518592  2.17813493  0.04543062] 1085.81164674
  
--------------------
Starting batch 5 at 2017-05-30 12:27:34.173531...
--------------------
49 [ 5.60127686  1.38282265  0.67452296  2.21837725  0.06329388] 1417.02575811
99 [ 5.61196946  1.41599447  0.48517827  2.18130195  0.01447519] 1028.05750587
149 [ 5.59215205  1.4287108   0.53590165  2.16943978  0.02722435] 1028.05750587
199 [ 5.5604079   1.42611492  0.65148726  2.18089522  0.02965991] 1028.05750587
249 [ 5.56662927  1.43442074  0.56782506  2.17789623  0.03493135] 1028.05750587
299 [ 5.56384946  1.43407426  0.5601087   2.17709108  0.03641362] 1028.05750587
  
--------------------
Starting batch 6 at 2017-05-30 12:29:59.003773...
--------------------
49 [ 5.69299275  1.40333397  0.10895635  2.1694446   0.05500288] 1181.96191615
99 [  5.63924791e+00   1.43095301e+00  -7.31792130e-04   2.16768779e+00
   2.71860225e-02] 1181.96191615
149 [ 5.61059153  1.4341239   0.01277864  2.16821746  0.02844459] 1181.96191615
199 [ 5.5765268   1.426118    0.01275459  2.17342636  0.03328514] 1122.15791226
249 [  5.57504528e+00   1.43032203e+00  -5.14621049e-03   2.17254444e+00
   3.60224026e-02] 1114.18644735
299 [ 5.57472048  1.42930727  0.0242219   2.16621752  0.0387201 ] 1114.18644735
  
--------------------
Starting batch 7 at 2017-05-30 12:32:17.254582...
--------------------
49 [ 5.54044325  1.46129777 -0.03733755  2.14118676  0.08220991] 1458.87231359
99 [ 5.50926889  1.46548529  0.01906854  2.1501433   0.03492276] 1458.87231359
149 [ 5.55568468  1.43733004  0.54516323  2.14907475  0.0327343 ] 1037.5769871
199 [ 5.54058053  1.42699368  0.01879908  2.16535483  0.0409422 ] 1037.5769871
249 [ 5.56014306  1.42537097 -0.26118174  2.16384866  0.03863068] 1037.5769871
299 [ 5.56109862  1.42594705 -0.37068242  2.16523409  0.04107338] 1037.5769871
  
--------------------
Starting batch 8 at 2017-05-30 12:34:34.411767...
--------------------
49 [ 5.590433    1.26335043 -0.46063316  2.36326282  0.07837387] 1143.41507012
99 [ 5.53251909  1.43628348 -0.4877308   2.1770519   0.0315003 ] 1072.43264803
149 [ 5.53790837  1.44155758 -0.4560077   2.1634516   0.02569399] 1072.43264803
199 [ 5.53923064  1.44073855 -0.52968086  2.16983576  0.03696309] 1072.43264803
249 [ 5.54873061  1.44149842 -0.52320877  2.16834986  0.04012671] 1072.28933871
299 [ 5.54781783  1.44056187 -0.55800571  2.16823497  0.04208117] 1072.28933871
  
--------------------
Starting batch 9 at 2017-05-30 12:36:57.353506...
--------------------
49 [ 5.59138819  1.46994093 -0.04651674  2.12763007  0.06218698] 1027.1355078
99 [ 5.59296216  1.45258154  0.19617282  2.15639409  0.03128275] 1027.1355078
149 [ 5.58359811  1.4462475   0.25059973  2.1618653   0.03249739] 1027.1355078
199 [ 5.56210558  1.44287111  0.40219895  2.16536393  0.04407132] 1027.1355078
249 [ 5.5582448   1.44170989  0.37416475  2.16598819  0.04314243] 1027.1355078
299 [ 5.55766113  1.43948748  0.51770052  2.16726855  0.04421077] 1027.1355078

In [ ]:
fig = plt.figure()
plt.plot(np.log10(np.sqrt(np.diagonal(track_covs[0,:,:-3,:-3],axis1=1,axis2=2))))
plt.legend(["${}$".format(x) for x in model.modelparam_names[:-3]])
plt.xlabel(r'\# experiments included in estimate')
plt.ylabel(r'$\log_{10}(\mathrm{std}[\cdot]/\mathrm{MHz})$')
plt.title('Uncertainty of Hamiltonian Parameters')

#fig.savefig('../fig/ham-param-uncertainty.pdf')


Out[ ]:
<matplotlib.text.Text at 0x7f7c80542290>

In [ ]:
fig = plt.figure()
plt.plot(track_means[5,:,:-2])
plt.legend(["${}$".format(x) for x in model.modelparam_names[:-3]],ncol=2)
plt.xlabel(r'# experiments included in estimate')
plt.ylabel(r'$\log_{10}(\mathrm{std}[\cdot]/\mathrm{MHz})$')
plt.title('Uncertainty of Hamiltonian Parameters')


Out[ ]:
<matplotlib.text.Text at 0x7f7c80b06950>

In [ ]:
def in_ellipse(x, A, c):
    y = c[np.newaxis,:] - x
    return np.einsum('ij,jl,il->i', y, np.linalg.inv(A), y) <= 1
def cv(percentage, axes = np.s_[0:1]):
    n_axes = updaters[0].est_mean()[axes].shape[0]
    ests = np.array([u.est_mean() for u in updaters])
    result = []
    for idx_u, u in enumerate(updaters):
        A = u.est_covariance_mtx()[axes, axes]
        c = u.est_mean()[axes]
        result += [np.array([
            1 if idx_u == idx_u2 else \
                in_ellipse(
                    u2.particle_locations[:,axes],
                    (chi2.ppf(percentage, n_axes)) * A, 
                    c
                ).sum() / u2.n_particles
            for idx_u2, u2 in enumerate(updaters)
        ]).mean()]
    return np.array(result) * 100

for idx_u, u in enumerate(updaters):
    if not single_updater.just_resampled:
        print "Resampling {}".format(idx_u)
        u.resample()

ps = np.linspace(0.1, 1, 20)

fig=plt.figure()
#plt.plot(ps, map(cv, ps))

cv(0.99)


Resampling 0
Resampling 1
Resampling 2
Resampling 3
Resampling 4
Resampling 5
Resampling 6
Resampling 7
Resampling 8
Resampling 9
/home/cgranade/anaconda3/envs/nvmeas/lib/python2.7/site-packages/qinfer/smc.py:512: ResamplerWarning: Resampling without additional data; this may not perform as desired.
  ResamplerWarning
Out[ ]:
array([ 95.8425 ,  99.0375 ,  95.84375,  77.4375 ,  90.98125,  93.33875,
        50.005  ,  97.93125,  70.7975 ,  91.8475 ])
<matplotlib.figure.Figure at 0x7f7c80a2fd10>

Simulate with estimate

In this section, we take posterior estimates and use them to simulate the experiments as a kind of posterior predictive check.


In [ ]:
def sim_rabi(est):
    sim = ham_model.likelihood([0],est[np.newaxis,:-2], np.array(expparams).flatten())[0,0,:]
    return sim[emodes==0]
def sim_ramsey(est):
    sim = ham_model.likelihood([0],est[np.newaxis,:-2], np.array(expparams).flatten())[0,0,:]
    return sim[emodes==1]

In [ ]:
def est_error_bars(confidence, p, alpha, beta):
    return np.sqrt(2) * erfinv(confidence) * np.sqrt(p*(p+1)*alpha + (p-1)*(p-2)*beta)/(alpha-beta)
def est_std(p, alpha, beta):
    return np.sqrt(p*(p+1)*alpha + (p-1)*(p-2)*beta)/(alpha-beta)

In [ ]:
p_rabi = np.sum(rabi_data[:,:,2]-rabi_data[:,:,1].astype(float),axis=0)/np.sum(rabi_data[:,:,0]-rabi_data[:,:,1],axis=0)
p_ramsey = np.sum(ramsey_data[:,:,2]-ramsey_data[:,:,1].astype(float),axis=0)/np.sum(ramsey_data[:,:,0]-ramsey_data[:,:,1],axis=0)

rabi_error_bars = est_error_bars(0.95, p_rabi, np.sum(rabi_bright_refs,axis=0), np.sum(rabi_dark_refs,axis=0))
ramsey_error_bars = est_error_bars(0.95, p_ramsey, np.sum(ramsey_bright_refs,axis=0), np.sum(ramsey_dark_refs,axis=0))

fig = plt.figure()

plt.plot(rabi_times, sim_rabi(single_updater.est_mean()),color='b',lw=2,label='SMC Rabi Fit')
plt.errorbar(rabi_times, p_rabi, yerr=rabi_error_bars, color='red', ls='', marker='o', markersize=3, capsize=5, capthick=1, ecolor='black', label='Normalized Data')

plt.ylabel(r'Tr$(\rho |0\rangle\langle 0|)$')
plt.xlabel(r'$t_r$ ($\mu$s)')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
           ncol=2, mode="expand", borderaxespad=0.)

export_figure(fig, 'qhl-rabi-fit')


Overwriting ../fig/qhl-rabi-fit.png
Overwriting ../fig/qhl-rabi-fit.pdf

In [ ]:
p_rabi = np.sum(rabi_data[:,:,2]-rabi_data[:,:,1].astype(float),axis=0)/np.sum(rabi_data[:,:,0]-rabi_data[:,:,1],axis=0)
p_ramsey = np.sum(ramsey_data[:,:,2]-ramsey_data[:,:,1].astype(float),axis=0)/np.sum(ramsey_data[:,:,0]-ramsey_data[:,:,1],axis=0)

rabi_error_bars = est_error_bars(0.95, p_rabi, np.sum(rabi_bright_refs,axis=0), np.sum(rabi_dark_refs,axis=0))
ramsey_error_bars = est_error_bars(0.95, p_ramsey, np.sum(ramsey_bright_refs,axis=0), np.sum(ramsey_dark_refs,axis=0))

fig = plt.figure()

plt.plot(ramsey_times, sim_ramsey(single_updater.est_mean()),color='b',lw=2, label='SMC Ramsey Fit')
plt.errorbar(ramsey_times[::3], p_ramsey[::3], yerr=ramsey_error_bars[::3], color='red', ls='', marker='o', markersize=3, capsize=5, capthick=1, ecolor='black', label='Normalized Data')

plt.ylabel(r'Tr$(\rho |0\rangle\langle 0|)$')
plt.xlabel(r'$t_w$ ($\mu$s)')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
           ncol=2, mode="expand", borderaxespad=0.)

export_figure(fig, 'qhl-ramsey-fit')


Overwriting ../fig/qhl-ramsey-fit.png
Overwriting ../fig/qhl-ramsey-fit.pdf

In [ ]:
fig, axes = plt.subplots(10,2,sharex=False,sharey=True,figsize=(14,18))
    
for idx_batch in range(n_batch):
    
    p_rabi = (rabi_data[idx_batch,:,2]-rabi_data[idx_batch,:,1].astype(float))/(rabi_data[idx_batch,:,0]-rabi_data[idx_batch,:,1])
    p_ramsey = (ramsey_data[idx_batch,:,2]-ramsey_data[idx_batch,:,1].astype(float))/(ramsey_data[idx_batch,:,0]-ramsey_data[idx_batch,:,1])

    rabi_error_bars = est_error_bars(0.95, p_rabi, rabi_bright_refs[idx_batch,:], rabi_dark_refs[idx_batch,:])
    ramsey_error_bars = est_error_bars(0.95, p_ramsey, ramsey_bright_refs[idx_batch,:], ramsey_dark_refs[idx_batch,:])
    
    axes[idx_batch][0].plot(rabi_times, sim_rabi(updaters[idx_batch].est_mean()),lw=2,label='SMC Rabi Fit')
    axes[idx_batch][0].errorbar(rabi_times, p_rabi, yerr=rabi_error_bars, color='red', ls='', marker='o', markersize=2, capsize=3, capthick=0.5, ecolor='black', label='Normalized Data')

    axes[idx_batch][0].set_ylim([-0.2,1.2])
    axes[idx_batch][0].set_xlim([0,0.8])
    axes[idx_batch][0].set_ylabel(r'Tr$(\rho |0\rangle\langle 0|)$')
    if idx_batch >= 9:
        axes[idx_batch][0].set_xlabel(r'$t_r$ ($\mu$s)')
    else:
        axes[idx_batch][0].xaxis.set_ticklabels([])
    axes[idx_batch][0].text(0.025, 1, 'batch {}'.format(idx_batch), style='italic',
        bbox={'facecolor':'yellow', 'alpha':0.2, 'pad':3})
    
    
    axes[idx_batch][1].plot(ramsey_times, sim_ramsey(updaters[idx_batch].est_mean()),lw=2,label='SMC Ramsey Fit')
    axes[idx_batch][1].errorbar(ramsey_times[::2], p_ramsey[::2], yerr=ramsey_error_bars[::2], color='red', ls='', marker='o', markersize=2, capsize=3, capthick=0.5, ecolor='black', label='Normalized Data')

    #plt.ylabel(r'Tr$(\rho |0\rangle\langle 0|)$')
    axes[idx_batch][1].set_ylim([-0.2,1.2])
    axes[idx_batch][1].set_xlim([0,2])
    if idx_batch >= 9:
        axes[idx_batch][1].set_xlabel(r'$t_w$ ($\mu$s)')
    else:
        axes[idx_batch][1].xaxis.set_ticklabels([])
    axes[idx_batch][1].text(0.025, 1, 'batch {}'.format(idx_batch), style='italic',
        bbox={'facecolor':'yellow', 'alpha':0.2, 'pad':3})
 
fig.subplots_adjust(hspace=0.1,wspace=0.05)


export_figure(fig, 'qhl-batch-fits')


Overwriting ../fig/qhl-batch-fits.png
Overwriting ../fig/qhl-batch-fits.pdf

Least Square Fits

Finally, we run WLSF on each of the batches and compare to SMC.


In [ ]:
def fit_fcn(X, Omega, we, dwc, an, T2i):
    # this function simulates experiment type k at time t with the given parameters
    ks, ts = X
    out = np.empty(len(ks))
    # might as well do a loop since it is much easier than dealing with expparams,
    # and ham_model would just do a loop to
    for idx_exp in range(len(ks)):
        k, t = ks[idx_exp], ts[idx_exp]
        if k == ham_model.RABI:
            sim = ham_model.likelihood(
                [0],
                np.array([[Omega, we, dwc, an, T2i, 0]]),
                np.array([(t,0,0,ham_model.RABI)], dtype=ham_model.expparams_dtype)
            )
        if k == ham_model.RAMSEY:
            sim = ham_model.likelihood(
                [0],
                np.array([[Omega, we, dwc, an, T2i, 0]]),
                np.array([(0.044,t,0,ham_model.RAMSEY)], dtype=ham_model.expparams_dtype)
            )
        out[idx_exp] = sim.flatten()[0]
    return out

First do least squares on the entire dataset:


In [ ]:
p_rabi = np.sum(rabi_data[:,:,2]-rabi_data[:,:,1].astype(float),axis=0)/np.sum(rabi_data[:,:,0]-rabi_data[:,:,1],axis=0)
p_ramsey = np.sum(ramsey_data[:,:,2]-ramsey_data[:,:,1].astype(float),axis=0)/np.sum(ramsey_data[:,:,0]-ramsey_data[:,:,1],axis=0)

rabi_error_bars = est_std(p_rabi, np.sum(rabi_bright_refs,axis=0), np.sum(rabi_dark_refs,axis=0))
ramsey_error_bars = est_std(p_ramsey, np.sum(ramsey_bright_refs,axis=0), np.sum(ramsey_dark_refs,axis=0))

xdata = np.concatenate([
        np.vstack([np.ones(rabi_times.size)*ham_model.RABI, rabi_times]).T,
        np.vstack([np.ones(ramsey_times.size)*ham_model.RAMSEY, ramsey_times]).T
    ]).T
ydata = np.concatenate([p_rabi, p_ramsey])
sigma = np.concatenate([rabi_error_bars, ramsey_error_bars])

popt_single, pcov_single = curve_fit(
    fit_fcn,
    xdata,
    ydata,
    single_updater.est_mean()[:-3],
    sigma,
    method='lm'
)

In [ ]:
popts = np.empty((n_batch,5))
pcovs = np.empty((n_batch,5,5))
for idx_batch in range(n_batch):
    p_rabi = (rabi_data[idx_batch,:,2]-rabi_data[idx_batch,:,1].astype(float))/(rabi_data[idx_batch,:,0]-rabi_data[idx_batch,:,1])
    p_ramsey = (ramsey_data[idx_batch,:,2]-ramsey_data[idx_batch,:,1].astype(float))/(ramsey_data[idx_batch,:,0]-ramsey_data[idx_batch,:,1])

    rabi_error_bars = est_std(p_rabi, rabi_bright_refs[idx_batch,:], rabi_dark_refs[idx_batch,:])
    ramsey_error_bars = est_std(p_ramsey, ramsey_bright_refs[idx_batch,:], ramsey_dark_refs[idx_batch,:])

    xdata = np.concatenate([
            np.vstack([np.ones(rabi_times.size)*ham_model.RABI, rabi_times]).T,
            np.vstack([np.ones(ramsey_times.size)*ham_model.RAMSEY, ramsey_times]).T
        ]).T
    ydata = np.concatenate([p_rabi, p_ramsey])
    sigma = np.concatenate([rabi_error_bars, ramsey_error_bars])

    print "Starting {}...".format(idx_batch)
    
    popts[idx_batch,:], pcovs[idx_batch,:,:] = curve_fit(
        fit_fcn,
        xdata,
        ydata,
        updaters[idx_batch].est_mean()[:-3],
        sigma,
        method='lm'
    )


Starting 0...
Starting 1...
Starting 2...
Starting 3...
Starting 4...
Starting 5...
Starting 6...
Starting 7...
Starting 8...
Starting 9...

In [ ]:
fig = plt.figure(figsize=(7,5))
ax = fig.gca()
for idx_u, u in enumerate(updaters):
    line = u.plot_posterior_marginal(1, smoothing=0.0003, other_plot_args={'label':"batch {}".format(idx_u),'lw':1})
    xs, ys = u.posterior_marginal(1, smoothing=0.0003)
    ax.fill_between(xs, 0, ys, facecolor=line[0].get_color(), alpha='0.1')


xs = np.linspace(1.43,1.46,400)
x_mean = single_updater.est_mean()[1]
x_var = single_updater.est_covariance_mtx()[1,1]
plt.plot(xs, np.exp(-(xs-x_mean)**2/(2*x_var))/(np.sqrt(2*np.pi*x_var)),'-',lw=2,c='k',label='all data')

xs = np.linspace(1.42,1.46,400)
x_mean = popt_single[1]
x_var = pcov_single[1,1]
plt.plot(xs, np.exp(-(xs-x_mean)**2/(2*x_var))/(np.sqrt(2*np.pi*x_var)),'--',lw=2,c='b',label='WLSF')

plt.xlim([1.415,1.465])
plt.ylim([0,280])
plt.ylabel('Posterior Marginal $\Pr(\omega_e)$')
plt.xlabel('$\omega_e$ (MHz)')
plt.legend(loc=1)

export_figure(fig, 'qhl-cv-field-posterior')


Overwriting ../fig/qhl-cv-field-posterior.png
Overwriting ../fig/qhl-cv-field-posterior.pdf

In [ ]:
fig = plt.figure(figsize=(7,5))
ax = fig.gca()
for idx_u, u in enumerate(updaters):
    line = u.plot_posterior_marginal(0, smoothing=0.0003, other_plot_args={'label':"batch {}".format(idx_u),'lw':1})
    xs, ys = u.posterior_marginal(0, smoothing=0.0003)
    ax.fill_between(xs, 0, ys, facecolor=line[0].get_color(), alpha='0.1')
        
xs = np.linspace(5.54,5.6,400)
x_mean = single_updater.est_mean()[0]
x_var = single_updater.est_covariance_mtx()[0,0]
plt.plot(xs, np.exp(-(xs-x_mean)**2/(2*x_var))/(np.sqrt(2*np.pi*x_var)),lw=2,c='k',label='all data')

xs = np.linspace(5.54,5.6,400)
x_mean = popt_single[0]
x_var = pcov_single[0,0]
plt.plot(xs, np.exp(-(xs-x_mean)**2/(2*x_var))/(np.sqrt(2*np.pi*x_var)),'--',lw=2,c='b',label='WLSF')

plt.xlim([5.52, 5.62])
plt.ylim([0,110])
plt.ylabel('Posterior Marginal $\Pr(\Omega)$')
plt.xlabel('$\Omega$ (MHz)')
plt.legend(loc=1)

export_figure(fig, 'qhl-cv-rabi-posterior')


Overwriting ../fig/qhl-cv-rabi-posterior.png
Overwriting ../fig/qhl-cv-rabi-posterior.pdf

In [ ]:
comparison_tex_table = ""
comparison_tex_table += "\\begin{tabularx}{0.85\\textwidth}{ll"
for k in range(5):
    comparison_tex_table += "XX"
comparison_tex_table += "}\n"

comparison_tex_table += "    ~ & ~"
table_names = ham_model.modelparam_names
table_names[2] = "\\delta\\Delta"
for idx_param in [1,2,0,3,4]:
    comparison_tex_table += " & \multicolumn{{2}}{{c}}{{${}$}}".format(table_names[idx_param])
comparison_tex_table += " \\\\ \n"

comparison_tex_table += "    ~ & ~"
for idx_param in range(5):
    comparison_tex_table += " & $\mathbb{E}$ & $\sigma$"
comparison_tex_table += " \\\\ \n"

def make_table_row(name, updater, popt, pcov):
    out = ""
    out += "    \\cline{3-12}\n"
    out += "    \\multirow{{2}}{{*}}{{{}\\quad}} & SMC~~~~ ".format(name)
    for idx_param in [1,2,0,3,4]:
        out += "& {0:.0f} & {1:.1f} ".format(
            1000*updater.est_mean()[idx_param], 
            1000*np.sqrt(updater.est_covariance_mtx()[idx_param,idx_param])
        )
    out += "\\\\ \n    & WLSF~~~~ "
    for idx_param in [1,2,0,3,4]:
        out += "& {0:.0f} & {1:.1f} ".format(
            1000*popt[idx_param], 
            1000*np.sqrt(pcov[idx_param,idx_param])
        )
    out += "\\\\ \n"
    return out
    
comparison_tex_table += make_table_row('all data', single_updater, popt_single, pcov_single)

for idx_batch in range(n_batch):
    comparison_tex_table += make_table_row(
        'batch {}'.format(idx_batch), 
        updaters[idx_batch], 
        popts[idx_batch,:], pcovs[idx_batch,:,:]
    )

comparison_tex_table += "    \\cline{3-12}\n"
comparison_tex_table += "\\end{tabularx}"

print "Writing comparison table to file..."
with open("../fig/qhl-results-tab.txt", "w") as text_file:
    text_file.write(comparison_tex_table)
print "Done."


Writing comparison table to file...
Done.