I'm gonna overwrite a lot of this notebook's old content. I changed the way I'm calculating wt, and wanna test that my training worked.


In [ ]:
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path

In [ ]:
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()

In [ ]:
training_file = '/u/ki/swmclau2/des/PearceRedMagicWpCosmo.hdf5'

em_method = 'gp'
split_method = 'random'

In [ ]:
a = 1.0
z = 1.0/a - 1.0

In [ ]:
fixed_params = {'z':z}#, 'r':0.18477483}

In [ ]:
n_leaves, n_overlap = 100, 2
emu = ExtraCrispy(training_file, n_leaves, n_overlap, split_method, method = em_method, fixed_params=fixed_params,
                 custom_mean_function = None, downsample_factor = 0.2)
emu = OriginalRecipe(training_file, method = em_method, fixed_params=fixed_params, independent_variable=None,\ custom_mean_function = None)

In [ ]:
emu._ordered_params

In [ ]:
params = {'ombh2': 0.021,
             'omch2': 0.11,
             'w0': -1.01,
             'ns': 0.9578462,
             'ln10As': 3.08,
             'H0': 68.1,
             'Neff': 3.04,
             'logM1': 14.0,
             'logMmin': 11.9,
             'f_c': 0.2,
             'logM0': 13.2,
             'sigma_logM': 0.12,
             'alpha':1.1}
params = {'ombh2': 0.021, 'omch2': 0.12, 'w0': -1, 'ns': 0.9578462, 'ln10As': 3.08, 'H0': 68.1, 'Neff': 3.04}
params = {'logM1': 14.0, 'logMmin': 11.9, 'f_c': 0.2, 'logM0': 13.2, 'sigma_logM': 0.12, 'alpha':1.1}

In [ ]:
wp = emu.emulate_wrt_r(params, emu.scale_bin_centers)[0]

In [ ]:
emu._x_mean, emu._x_std

In [ ]:
emu.x.shape

In [ ]:
plt.plot(emu.scale_bin_centers, wp)
plt.xscale('log')
plt.xlabel(r'$r$ [Mpc]')
plt.ylabel(r'$w_p(r_p)$')
plt.show()
params = {'ombh2': 0.021, 'omch2': 0.11, 'w0': -1, 'ns': 0.9578462, 'ln10As': 3.08, 'H0': 68.1, 'Neff': 3.04, 'logM1': 14.0, 'logMmin': 11.9, 'f_c': 0.2, 'logM0': 13.2, 'sigma_logM': 0.12, 'alpha':1.1}

In [ ]:
param_name = 'logMmin'
param_bounds = emu.get_param_bounds(param_name)
pvals = np.linspace(param_bounds[0],param_bounds[1], 5)

for val in pvals:
    params[param_name] = val
    #print params
    wp = emu.emulate_wrt_r(params, emu.scale_bin_centers)[0]
    #print(wp)
    plt.plot(emu.scale_bin_centers, wp, label = '%s = %.2f'%(param_name, val))
    
plt.plot(emu.scale_bin_centers, np.mean(emu._y_mean)*np.ones_like(emu.scale_bin_centers), color = 'k')

    
plt.xscale('log')
plt.xlabel(r'$r$ [Mpc]')
plt.ylabel(r'$w_p(r_p)$')
plt.show()

In [ ]:
432/18

In [ ]:
idx = 25
binlen = len(emu.scale_bin_centers)

params = {pname: p for pname, p in zip(emu.get_param_names(), emu._x_std[:-1]*emu.x[idx*binlen, :-1] + emu._x_mean[:-1])}

In [ ]:
wp = emu.emulate_wrt_r(params,emu.scale_bin_centers)[0]

In [ ]:
plt.plot(emu.scale_bin_centers, wp, label = 'Emu')
plt.plot(emu.scale_bin_centers, emu._y_std*emu.y[idx*binlen:(idx+1)*binlen]+emu._y_mean, label = 'Truth')
#plt.plot(emu.x[idx*binlen:(idx+1)*binlen, -1], lm_pred)
plt.xscale('log')
plt.xlabel(r'$r$ [Mpc]')
plt.ylabel(r'$w_p(r_p)$')
plt.legend(loc = 'best')
plt.show()

In [ ]:
emu.y.shape

In [ ]:
emu._y_mean

In [ ]:
params['f_c'] = 0.1

In [ ]:
params['r'] = emu.scale_bin_centers

In [ ]:
t_list = [params[pname] for pname in emu._ordered_params if pname in params]
t_grid = np.meshgrid(*t_list)
t = np.stack(t_grid).T
t = t.reshape((-1, emu.emulator_ndim))

In [ ]:
t-=emu._x_mean
t/=(emu._x_std + 1e-5)

In [ ]:
for i in xrange(emu.y.shape[0]):
    print gp.predict(emu.y[i], t, return_cov= False)

In [ ]:
emu.mean_function(t)

In [ ]:
emu._mean_func.named_steps['linearregression'].coef_

In [ ]: