In [1]:
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path
In [2]:
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
In [3]:
training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_corrab_emulator/'
em_method = 'gp'
split_method = 'random'
In [4]:
a = 1.0
z = 1./a-1.0
In [5]:
fixed_params = {'z':z}#, 'r':0.18477483}
In [6]:
emu = OriginalRecipe(training_dir, method = em_method, fixed_params=fixed_params)
In [7]:
emu.scale_bin_centers
Out[7]:
In [8]:
emu._ordered_params
Out[8]:
In [9]:
emu._get_initial_guess(None)
Out[9]:
In [10]:
import scipy.optimize as op
from itertools import izip
In [11]:
def nll(p):
# Update the kernel parameters and compute the likelihood.
# params are log(a) and log(m)
#ll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# ll += emulator.lnlikelihood(_y, quiet=True)
emu._emulator.kernel[ab_param_idxs] = p
print p
ll= emu._emulator.lnlikelihood(emu.y, quiet=False)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
#gll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# gll += emulator.grad_lnlikelihood(_y, quiet=True)
emu._emulator.kernel[ab_param_idxs] = p
gll = emu._emulator.grad_lnlikelihood(emu.y, quiet=True)
return -gll[ab_param_idxs]
In [14]:
ab_param_names = ['mean_occupation_centrals_assembias_param1',
#'mean_occupation_centrals_assembias_slope1',
#'mean_occupation_centrals_assembias_split1',
'mean_occupation_satellites_assembias_param1']#,
#'mean_occupation_satellites_assembias_slope1',
#'mean_occupation_satellites_assembias_split1']
In [15]:
ab_param_idxs = []
for apn in ab_param_names:
ab_param_idxs.append(emu._ordered_params.keys().index(apn)+1)
ab_param_idxs = np.array(ab_param_idxs)
In [16]:
p0 = np.ones_like(ab_param_idxs) #emu._emulator.kernel.vector[ab_param_idxs]
In [17]:
p0
Out[17]:
In [ ]:
#p0 = np.log(np.random.rand(emu._emulator.kernel.vector.shape[0]))
results = op.minimize(nll, p0, jac=grad_nll, method = 'Newton-CG')
In [19]:
print results.x
print results.success
In [20]:
np.exp(results.x)
Out[20]:
In [ ]: