I am trying to estimate a shape noise contribution for my delta sigma estimate. Sukhdeep gave me some code and some instructions for how to compute it, but I'm having a little difficulty translating them. I'm gonna do the tinkering here.

The first thing he said was that I didn't need his code to compute the Diagonal contribution:


If you only need to add shape noise to your existing covariance, you can simply use the formula, Shape Noise (variance)=sigma_e^2 / #of pairs in a bin. This term is diagonal in covariance and for log bins should scale as 1/rp.

of pairs in a bin ~ N_lensN_sourceArea_bin/Area_survey



In [45]:
from matplotlib import pyplot as plt
%matplotlib inline
#import seaborn as sns
#sns.set()
import matplotlib.colors as colors

In [46]:
import numpy as np
#from nbodykit.source.catalog.halos import HaloCatalog
#from nbodykit.source.catalog.file import HDFCatalog
#from nbodykit.cosmology import Cosmology
#from nbodykit.algorithms import FFTPower
import h5py
import yaml
from scipy.optimize import minimize_scalar

In [47]:
from pearce.mocks.kittens import DarkSky
from pearce.mocks.customHODModels import *

In [48]:
def make_LHC(ordered_params, N, seed = None):

    if seed is None:
        seed = int(time())
    np.random.seed(seed)

    points = []
    # by linspacing each parameter and shuffling, I ensure there is only one point in each row, in each dimension.
    for plow, phigh in ordered_params.itervalues():
        point = np.linspace(plow, phigh, num=N)
        np.random.shuffle(point)  # makes the cube random.
        points.append(point)
    return np.stack(points).T

In [49]:
def add_logMmin(hod_params, cat):

    hod_params['logMmin'] = 13.0 #initial guess
    #cat.populate(hod_params) #may be overkill, but will ensure params are written everywhere
    def func(logMmin, hod_params):
        hod_params.update({'logMmin':logMmin})
        return (cat.calc_analytic_nd(hod_params, min_ptcl = min_ptcl) - nd)**2

    res = minimize_scalar(func, bounds = logMmin_bounds, args = (hod_params,), options = {'maxiter':100}, method = 'Bounded')

    # assuming this doens't fail
    #print 'logMmin', res.x
    hod_params['logMmin'] = res.x
subbox_no = 264
HOD = (Zheng07Cens, Zheng07Sats)
cat = DarkSky(subbox_no, system = 'sherlock') cat.load_model(1.0, HOD = HOD, hod_kwargs = {'modlulate_with_cenocc': True})

In [ ]:
config_fname = 'xi_cosmo_trainer.yaml'

with open(config_fname, 'r') as ymlfile:
    cfg = yaml.load(ymlfile)

In [54]:
n_g = float(cfg['HOD']['fixed_nd'] )
min_ptcl = int(cfg['HOD']['min_ptcl'])
hod_param_ranges = cfg['HOD']['ordered_params'] N = 5 LHC = make_LHC(hod_param_ranges, N, 16)# 23) hod_dicts = [dict(zip(hod_param_ranges.keys(), vals)) for vals in LHC] logMmin_bounds = hod_param_ranges['logMmin'] del hod_param_ranges['logMmin']
hod_dicts[0]['logMmin'] = 12.1
#add_logMmin(hod_dicts[0], cat) cat.model.param_dict.update(hod_dicts[0])
darksky_fname = cat.fname
nbody_cat = HDFCatalog(darksky_fname, root = 'halos/subbox_%03d'%subbox_no)
nbody_cat.columns
nbody_cat['Position'] = np.mod(np.c_[nbody_cat['x'], nbody_cat['y'], nbody_cat['z']], cat.Lbox)
nbody_cat['Velocity'] = np.zeros((len(nbody_cat['x']), 3))
c = Cosmology() c = c.clone(H0 = cat.cosmology.H0.value, Omega0_b = cat.cosmology.Ob0, Omega0_cdm = cat.cosmology.Om0-cat.cosmology.Ob0)
nbody_halocat = HaloCatalog(nbody_cat, c, cat.redshifts[0], mass = 'm200b')
galcat = nbody_halocat.populate(cat.model, cat.Lbox)
galcat.columns
mesh = galcat.to_mesh(window='tsc', Nmesh=256*4, compensated=True, position='Position', BoxSize = cat.Lbox)

In [ ]:
kmax= 50
kmin= 0.5e-3

In [ ]:
r = FFTPower(mesh, mode='1d', dk=0.005, kmin=kmin)

In [ ]:
k = r.power['k']
p_g = r.power['power'].real

In [ ]:
k.shape

In [ ]:
plt.loglog(k, p_g)

In [ ]:
np.save('./p_g.npy', np.c_[k, p_g])

?? cat.calc_sigma_crit_inv


In [ ]:
rp_bins = np.logspace(-1.0, 1.6, 19) # TODO h's?

In [ ]:
rp_points = (rp_bins[1:]+rp_bins[:-1])/2.0

Below numbers are the defaults, also what's in Ben Wibking's paper. They're probably ok so long as I found out where they originate i.e. what n_z they correspond to.


In [ ]:
sigma_crit = 4.7e3 # TODO, need to assume a source distribution
sigma_e= 0.36# 0.36
sigma_gamma=sigma_e/1.7 # where does the 1.7 come from here? 
n_s= 8 # TODO need to assume a source distribution
shape_noise=sigma_crit**2*sigma_gamma**2/n_s#*cosmo.H_z(z=0.27)/cosmo.c

In [ ]:
g_shot_noise=1./n_g

In [ ]:
g_shot_noise, shape_noise

In [ ]:
# TODO update with sim volume + Pi length
area=10000
area_comoving=area*(np.pi/180)**2*cosmo.comoving_distance(z=.27)**2
L_W=500
vol=area_comoving*L_W
vol=vol.value

In [ ]:
L_W = 500 # ? i don't know the meaning of this number
vol = ((cat.Lbox/cat.h)**2)*L_W

In [ ]:
taper_kw=dict({'large_k_lower':10,'large_k_upper':kmax,'low_k_lower':kmin,'low_k_upper':kmin*1.2})

In [ ]:
rmin=.05
rmax=100

In [ ]:
from hankel_transform import hankel_transform

In [ ]:
HT=hankel_transform(rmin=rmin,rmax=rmax,kmax=kmax,j_nu=[2],n_zeros=int(2e5),kmin=kmin)

In [ ]:
r,cov_ggkk =HT.projected_covariance(k_pk = k,pk1= p_g+ g_shot_noise,pk2=np.zeros_like(p_g)+shape_noise,j_nu=2,taper=True,**taper_kw)

In [ ]:
#plt.imshow(cov_ggkk)

In [ ]:
rp_re,cov_ggkk_re=HT.bin_cov(r=r,cov=cov_ggkk,r_bins=rp_bins)

In [ ]:
#plt.imshow(cov_ggkk_re)

In [ ]:
#corr=HT.corr_matrix(cov=cov_ggkk_re)

In [ ]:
#plt.imshow(corr)

In [ ]:
print rp_re
np.save('shape_noise_covmat.npy', cov_ggkk_re)