In [1]:
import h5py
from pearce.mocks.kittens import TrainingBox

In [2]:
fname = '/u/ki/swmclau2/des/wp_zheng07/PearceWpCosmo.hdf5'

In [3]:
with h5py.File(fname, 'r') as f:
    hod_pnames=  f.attrs['hod_param_names']
    hod_pvals =  f.attrs['hod_param_vals']

In [4]:
cat = TrainingBox(0)
cat.load(1.0, HOD='zheng07')


/u/ki/swmclau2/.local/lib/python2.7/site-packages/halotools-0.7.dev5005-py2.7-linux-x86_64.egg/halotools/sim_manager/cached_halo_catalog.py:567: H5pyDeprecationWarning: The default file mode will change to 'r' (read-only) in h5py 3.0. To suppress this warning, pass the mode you need to h5py.File(), or set the global default h5.get_config().default_file_mode, or set the environment variable H5PY_DEFAULT_READONLY=1. Available modes are: 'r', 'r+', 'w', 'w-'/'x', 'a'. See the docs for details.
  f = h5py.File(self.log_entry.fname)

In [18]:
param_dict = dict(zip(hod_pnames, hod_pvals[1]))
cat.model.param_dict.update(param_dict)

In [29]:
from scipy.optimize import minimize_scalar
def _add_logMmin(hod_params, cat, nd = 5e-4):
    """
    In the fixed number density case, find the logMmin value that will match the nd given hod_params
    :param: hod_params:
        The other parameters besides logMmin
    :param cat:
        the catalog in question
    :return:
        None. hod_params will have logMmin added to it.
    """
    hod_params['logMmin'] = 13.0 #initial guess
    #cat.populate(hod_params) #may be overkill, but will ensure params are written everywhere
    def func(logMmin, hod_params):
        print logMmin
        hod_params.update({'logMmin':logMmin})
        return (cat.calc_analytic_nd(hod_params, min_ptcl=100) - nd)**2

    res = minimize_scalar(func, bounds = (12, 16), args = (hod_params,), options = {'maxiter':100}, method = 'Bounded')

In [30]:
_add_logMmin(param_dict, cat)


13.52786404500042
14.47213595499958
12.94427190999916
13.561048129965497
13.32919723755664
13.18216884556037
13.23167401698002
13.241236861110862
13.237170023933595
13.23722841943272
13.237212086143547
13.237208556470629
13.237215615816465

In [31]:
param_dict


Out[31]:
{'alpha': 1.051951951951952,
 'conc_gal_bias': 0.7987987987987988,
 'logM0': 13.42182182182182,
 'logM1': 14.033333333333333,
 'logMmin': 13.237215615816465,
 'sigma_logM': 0.48018018018018016}

In [32]:
cat.populate(param_dict, min_ptcl=100)

In [33]:
len(cat.model.mock.galaxy_table)


Out[33]:
519652

In [34]:
5e-4*(1000**3)


Out[34]:
500000.0

In [39]:
cat.calc_analytic_nd(param_dict, min_ptcl=100)


Out[39]:
0.0004999967787108785

In [ ]: