In [21]:
import numpy as np
import astropy
from itertools import izip
from pearce.emulator import OriginalRecipe, ExtraCrispy
In [22]:
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
In [23]:
training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_sham_free_split/'
experts, overlap = 10, 2
fixed_params = {'z':0.0}
In [24]:
emu = OriginalRecipe(training_dir, fixed_params=fixed_params)
#emu = ExtraCrispy(training_dir, experts, overlap, fixed_params = fixed_params)
In [25]:
names = ['mean_occupation_centrals_assembias_param1','mean_occupation_satellites_assembias_param1',\
'mean_occupation_centrals_assembias_split1','mean_occupation_satellites_assembias_split1']
In [31]:
#mock_wp = cat.calc_wp(rp_bins, RSD= False)
MAP = np.array([ 1.0, 0.0,0.5,0.5])
params = dict(zip(names, MAP))
#print params.keys()
emu_wps = []
#mock_nds = []
split = np.linspace(0.1, 0.9, 8)
#cat.model._input_model_dictionary['centrals_occupation']._split_abscissa = split_abcissa
#cat.model._input_model_dictionary['satellites_occupation']._split_abscissa = split_abcissa
for p in split:
params['mean_occupation_centrals_assembias_split1'] = p
params['mean_occupation_satellites_assembias_split1'] = p
#print params.keys()
#print cat.model.param_dict
emu_wps.append(emu.emulate_wrt_r(params, emu.scale_bin_centers)[0])
emu_wps = np.array(emu_wps)
In [32]:
params = dict(zip(names, [0.0,0.0,0.5,0.5]))
print params
noab_wp = emu.emulate_wrt_r(params, emu.scale_bin_centers)[0]
In [33]:
plt.figure(figsize=(10,8))
for p, emu_wp in zip(split, emu_wps):
#print emu_wp
plt.plot(emu.scale_bin_centers, emu_wp, label = p)
#plt.plot(bin_centers, sham_wp, ls='--', label = 'SHAM')
plt.plot(emu.scale_bin_centers, noab_wp, ls=':', label = 'No AB')
#plt.loglog()
plt.xscale('log')
plt.legend(loc='best',fontsize = 15)
plt.xlim([1e-1, 30e0]);
#plt.ylim([1,15000])
plt.xlabel(r'$r$',fontsize = 15)
plt.ylabel(r'$\xi(r)$',fontsize = 15)
plt.show()
In [34]:
plt.figure(figsize=(10,8))
for p, emu_wp in zip(split, emu_wps):
plt.plot(emu.scale_bin_centers, emu_wp/noab_wp, label = p)
#plt.plot(bin_centers, sham_wp, ls='--', label = 'SHAM')
#plt.plot(bin_centers, noab_wp, ls=':', label = 'No AB')
#plt.loglog()
plt.xscale('log')
plt.legend(loc='best',fontsize = 15)
plt.xlim([1e-1, 15e0]);
plt.ylim([0.8, 1.2])
plt.xlabel(r'$r$',fontsize = 15)
plt.ylabel(r'$\xi(r)$',fontsize = 15)
plt.show()
In [24]:
import scipy.optimize as op
from itertools import izip
In [25]:
def nll(p):
# Update the kernel parameters and compute the likelihood.
# params are log(a) and log(m)
#ll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# ll += emulator.lnlikelihood(_y, quiet=True)
emu._emulator.kernel[:] = p
print p
ll= emu._emulator.lnlikelihood(emu.y, quiet=False)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
#gll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# gll += emulator.grad_lnlikelihood(_y, quiet=True)
emu._emulator.kernel[:] = p
gll = emu._emulator.grad_lnlikelihood(emu.y, quiet=True)
return -gll
In [26]:
emu.goodness_of_fit(training_dir)
Out[26]:
In [27]:
p0 = emu._emulator.kernel.vector
In [28]:
p0 = np.log(np.random.rand(emu._emulator.kernel.vector.shape[0]))
results = op.minimize(nll, p0, jac=grad_nll)
In [30]:
results.x
Out[30]:
In [31]:
p0
Out[31]:
In [32]:
emu._emulator.kernel[:] = results.x
emu._emulator.recompute()
Out[32]:
In [34]:
print results.x
print emu.get_param_names()
In [38]:
emu._emulator.kernel.pars
Out[38]:
In [ ]: