In [1]:
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path

In [2]:
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()

In [3]:
training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_corrab_emulator/'

em_method = 'gp'
split_method = 'random'

In [4]:
a = 1.0
z = 1./a-1.0

In [5]:
fixed_params = {'z':z}#, 'r':0.18477483}
n_leaves, n_overlap = 10, 2 emu = ExtraCrispy(training_dir, n_leaves, n_overlap, split_method, method = em_method, fixed_params=fixed_params)

In [6]:
emu = OriginalRecipe(training_dir, method = em_method, fixed_params=fixed_params)

In [7]:
emu.scale_bin_centers


Out[7]:
array([  0.09581734,   0.13534558,   0.19118072,   0.27004994,
         0.38145568,   0.53882047,   0.76110414,   1.07508818,
         1.51860241,   2.14508292,   3.03001016,   4.28000311,
         6.04566509,   8.53972892,  12.06268772,  17.0389993 ,
        24.06822623,  33.99727318])

In [8]:
emu._ordered_params


Out[8]:
OrderedDict([('logMmin', (12.5, 13.5)),
             ('sigma_logM', (0.2, 1.0)),
             ('logM0', (10.0, 14.0)),
             ('logM1', (13.0, 16.0)),
             ('alpha', (0.75, 1.25)),
             ('f_c', (0.95, 1.0)),
             ('mean_occupation_satellites_assembias_param1', (-1.0, 1.0)),
             ('mean_occupation_centrals_assembias_param1', (-1.0, 1.0)),
             ('r', (0.095817335000000003, 33.997273184999997))])

In [9]:
emu._get_initial_guess(None)


Out[9]:
{'alpha': 3.63498762588,
 'amp': 1.18212664544,
 'disp_func_slope_centrals': 10.0,
 'disp_func_slope_satellites': 10.0,
 'f_c': 0.327508062386,
 'logM0': 15.8416094906,
 'logM1': 1.66509412286,
 'logMmin': 1.7348042925,
 'mean_occupation_centrals_assembias_param1': 112.3,
 'mean_occupation_centrals_assembias_split1': 123.67,
 'mean_occupation_satellites_assembias_param1': 0.5484,
 'mean_occupation_satellites_assembias_split1': 0.00663,
 'r': 0.306139450843,
 'sigma_logM': 5.36288382789}

In [10]:
import scipy.optimize as op
from itertools import izip

In [11]:
def nll(p):
    # Update the kernel parameters and compute the likelihood.
    # params are log(a) and log(m)
    #ll = 0
    #for emulator, _y in izip(self._emulators, self.y):
    #    emulator.kernel[:] = p
    #    ll += emulator.lnlikelihood(_y, quiet=True)
    emu._emulator.kernel[ab_param_idxs] = p
    print p
    ll= emu._emulator.lnlikelihood(emu.y, quiet=False)

    # The scipy optimizer doesn't play well with infinities.
    return -ll if np.isfinite(ll) else 1e25

# And the gradient of the objective function.
def grad_nll(p):
    # Update the kernel parameters and compute the likelihood.
    #gll = 0
    #for emulator, _y in izip(self._emulators, self.y):
    #    emulator.kernel[:] = p
    #    gll += emulator.grad_lnlikelihood(_y, quiet=True)
    emu._emulator.kernel[ab_param_idxs] = p
    gll = emu._emulator.grad_lnlikelihood(emu.y, quiet=True)
    return -gll[ab_param_idxs]

In [14]:
ab_param_names = ['mean_occupation_centrals_assembias_param1',
#'mean_occupation_centrals_assembias_slope1',
#'mean_occupation_centrals_assembias_split1',
'mean_occupation_satellites_assembias_param1']#,
#'mean_occupation_satellites_assembias_slope1',
#'mean_occupation_satellites_assembias_split1']

In [15]:
ab_param_idxs = []
for apn in ab_param_names:
    ab_param_idxs.append(emu._ordered_params.keys().index(apn)+1)
    
ab_param_idxs = np.array(ab_param_idxs)

In [16]:
p0 = np.ones_like(ab_param_idxs) #emu._emulator.kernel.vector[ab_param_idxs]

In [17]:
p0


Out[17]:
array([1, 1])

In [ ]:
#p0 = np.log(np.random.rand(emu._emulator.kernel.vector.shape[0]))
results = op.minimize(nll, p0, jac=grad_nll, method = 'Newton-CG')

In [19]:
print results.x
print results.success


[ 1.  1.]
False

In [20]:
np.exp(results.x)


Out[20]:
array([ 2.71828183,  2.71828183])

In [ ]: