First import all the modules such as healpy and astropy needed for analyzing the structure
In [1]:
import healpix_util as hu
import astropy as ap
import numpy as np
from astropy.io import fits
from astropy.table import Table
import astropy.io.ascii as ascii
from astropy.io import fits
from astropy.constants import c
import matplotlib.pyplot as plt
import math as m
from math import pi
#from scipy.constants import c
import scipy.special as sp
from astroML.decorators import pickle_results
from scipy import integrate
import warnings
from sklearn.neighbors import BallTree
import pickle
import multiprocessing as mp
import time
from aptestmetricdt import *
from aptestmetricdz import *
from scipy.spatial import distance as d
from apcat import *
from progressbar import *
from tqdm import *
from functools import partial
import pymangle
from apdz import *
from apdt import *
from scipy.optimize import curve_fit
#from astroML.datasets import fetch_sdss_specgals
#from astroML.correlation import bootstrap_two_point_angular
%matplotlib inline
In [2]:
# Getting back the objects:
with open('../output/datzAP.pkl') as f: # Python 3: open(..., 'rb')
dat = pickle.load(f)
dat
Out[2]:
Read the data file (taken from http://cosmo.nyu.edu/~eak306/SDSS-LRG.html ) converted to ascii with comoving distance etc. in V01 reading from pkl files for faster read
In [3]:
# Getting back the objects:
with open('../output/rdatzAP.pkl') as f: # Python 3: open(..., 'rb')
datR = pickle.load(f)
datR
Out[3]:
In [4]:
dr2d=np.zeros((10,10))
In [5]:
rng = np.array([[0, 0.02], [0, 0.02]])
In [6]:
%%time
dist0=d.cdist([dat[0],],datR,APdz)[0]
dist1=d.cdist([dat[0],],datR,APzdth)[0]
print np.histogram2d(dist0, dist1,range=rng)
In [7]:
%%time
for i in tqdm(xrange(len(dat))):
dist0=d.cdist([dat[i],],datR,APdz)[0]
dist1=d.cdist([dat[i],],datR,APzdth)[0]
dr2d+=np.histogram2d(dist0, dist1,range=rng)[0]
print dr2d
In [8]:
with open('dr2ddr72v06cdist200k.pkl','w') as f:
pickle.dump(dr2d,f)
dr2d
Out[8]:
In [ ]:
In [ ]:
dist0
In [ ]:
len(dist0)
In [ ]:
dist0.size
In [ ]:
dist0.flatten
print dist0
In [ ]:
len(dist0)
In [ ]:
%%time
dist0=d.cdist([dat[0],],dat,APdz)[0]
dist1=d.cdist([dat[0],],dat,APzdtheta)[0]
print np.histogram2d(dist0, dist1,range=rng)
#print dd2d
In [ ]:
In [ ]:
dist0
In [ ]:
In [ ]:
dist0=dist0[0]
In [ ]:
dist0[1]
In [ ]:
dist0[1:len(dist0)]
In [ ]:
len(dist0)
In [ ]:
dist0.size
In [ ]:
help(dist0.flatten)
In [ ]:
dist0[0]
In [ ]:
np.array.flatten(dist0)
In [ ]:
len(dist0)
In [ ]:
In [ ]:
In [ ]:
%%time
while len(dat)>0:
i=len(dat)-1
while i>0:
dist[i]=APcat(dat[0],dat[i])
i-=1
dd2d+=np.histogram2d(dist[:,0], dist[:,1],range=rng)[0]
dat=np.delete(dat,0,axis=0)
In [ ]:
dd2d
In [ ]:
In [ ]:
In [ ]:
%%time
while len(dat)>0:
i=len(dat)-1
while i>0:
dist=500*APcat(dat[0],dat[i])
dd2d+=np.histogram2d(dist[0], dist[1], bins = 10, range = rng)
i-=1
dat=np.delete(dat,0,axis=0)
In [ ]:
def binDists2d(dat, dz = lambda u, v: abs(u[0]-v[0]), zdth = lambda u, v: 0.5*(u[0]+v[0])*np.arccos(np.sin(u[2])*np.sin(v[2])+np.cos(u[2])*np.cos(v[2])*np.cos(u[1]-v[1]))):
i, j = np.triu_indices(dat.shape[0], 1)
dist0 = dz(dat[i], dat[j])
dist1 = zdth(dat[i], dat[j])
rng = np.array([[0, 0.02], [0, 0.02]])
return np.histogram2d(dist0, dist1, bins = 10, range = rng)
In [ ]:
td=np.random.rand(1000,3)
In [ ]:
binDists2d(td)
In [ ]:
%%time
binDists2d(dat)
In [ ]:
In [ ]:
In [ ]:
def binDists2d(dat, f1 = 'euclidean', f2 = 'cosine'):
dist0 = APcat(dat, f1)
dist1 = d.pdist(dat, f2)
return np.histogram2d(dist0, dist1, bins = 10)
In [ ]:
dd2d
In [ ]:
plt.contour(dd2d)
In [ ]:
with open('DR72DD2DMI.pkl', 'w') as f:
pickle.dump(dd2d,f)
In [ ]:
with open('DR72DD2DMI.pkl') as f:
DD2D = pickle.load(f)
DD2D
In [ ]:
dzbin=zdthbin=np.arange(0.002,0.022,0.002)
In [ ]:
plt.contour(dzbin,zdthbin,dd2d)
In [ ]:
dzbin
In [ ]:
plt.contour(dzbin,zdthbin,dd2d,levels=[ 5041., 13955., 23161., 31557., 38796., 46402., 53552.,
60708., 67437., 74549.])
In [ ]:
In [ ]:
for i in range(len(dat)-1):
for j in range(len(dat)-1):
dist=APcat(dat[0],dat[i])
ind0=int(dist[0]/0.002)
ind1=int(dist[1]/0.002)
if ind0>9 or ind1>9:
pass
else:
dd2d[ind0,ind1]+=1
dat=np.delete(dat,0,0)
print len(dat)
print dd2d
In [ ]:
for i in range(len(dat)):
In [ ]:
bins=np.arange(0,0.022,0.002)
print bins
In [ ]:
%%time
from apmetric6 import *
BTdat6 = BallTree(dat,metric='pyfunc',func=APmetric6,leaf_size=5)
In [ ]:
BTdat6
In [ ]:
%%time
per6=BTdat6.two_point_correlation(dat,bins)
In [ ]:
print per6
One has to change if condition in the metric definition of dz<=0.002 to 0.002<dz<=0.004 and so on
In [ ]:
%%time
from apmetric4 import *
BTdat4 = BallTree(dat,metric='pyfunc',func=APmetric4,leaf_size=5)
In [ ]:
BTdat4
In [ ]:
%%time
per4=BTdat4.two_point_correlation(dat,bins)
In [ ]:
print per4
In [ ]:
In [ ]:
%%time
from apmetric3 import *
BTdat3 = BallTree(dat,metric='pyfunc',func=APmetric3,leaf_size=5)
BTdat3
%%time
per3=BTdat3.two_point_correlation(dat,bins)
print per3print bins
In [ ]:
Nbins=len(bins)
In [ ]:
Nbins
In [ ]:
LCfmetric=LCDMmetric
In [ ]:
LCfmetric(dat[0],dat[1])
In [ ]:
%%time
start_time=time.time()
counts_DD=BTDLC.two_point_correlation(dat,bins)
print counts_DD
end_time=time.time()
tottime=end_time-start_time
print "Total run time:"
print tottime
with open('BTDcDDLCf.pkl', 'w') as f:
pickle.dump(counts_DD,f)
In [ ]:
with open('BTDcDDLCf.pkl') as f:
counts_DD = pickle.load(f)
counts_DD
In [ ]:
DD=np.diff(counts_DD)
In [ ]:
DD
In [ ]:
plt.plot(bins[1:len(bins)],DD,'ro-')
BallTree.two_point_correlation works almost 10 times faster! with leaf_size=5 Going with it to the random catalog
In [ ]:
dataR=ascii.read("./output/rand200kdr72.dat")
In [ ]:
dataR
In [ ]:
len(dataR)
In [ ]:
dataR=ascii.read("./output/rDR7200kLCsrarf.dat")
In [ ]:
dataR
In [ ]:
dataR.remove_column('z')
dataR.remove_column('ra')
dataR.remove_column('dec')
In [ ]:
dataR
In [ ]:
rs=np.array(dataR['s'])
rrar=np.array(dataR['rar'])
rdecr=np.array(dataR['decr'])
In [ ]:
datR=np.array([rs,rrar,rdecr])
In [ ]:
datR
In [ ]:
datR.reshape(3,len(dataR))
In [ ]:
datR=datR.transpose()
In [ ]:
datR
In [ ]:
# Saving the objects:
with open('rDR7200kLCsrarf.pkl', 'w') as f: # Python 3: open(..., 'wb')
pickle.dump(datR, f)
In [ ]:
# Getting back the objects:
with open('rDR7200kLCsrarf.pkl') as f: # Python 3: open(..., 'rb')
datR = pickle.load(f)
datR
In [ ]:
%%time
BT_RLC = BallTree(datR,metric='pyfunc',func=LCfmetric,leaf_size=5)
with open('BTR200kdatsLCf.pkl', 'w') as f:
pickle.dump(BT_RLC,f)
In [ ]:
with open('BTR200kdatsLCf.pkl') as f:
BTRLC = pickle.load(f)
BTRLC
In [ ]:
%%time
start_time=time.time()
counts_RR=BTRLC.two_point_correlation(datR,bins)
print counts_RR
end_time=time.time()
tottime=end_time-start_time
print "Total run time:"
print tottime
with open('BTR200kcRRLCf.pkl', 'w') as f:
pickle.dump(counts_RR,f)
with open('BTR200kcRRLCf.pkl') as f:
counts_RR = pickle.load(f)
counts_RR
In [ ]:
counts_RR
In [ ]:
RR=np.diff(counts_RR)
In [ ]:
RR
In [ ]:
plt.plot(bins[1:len(bins)],RR,'bo-')
In [ ]:
RR_zero = (RR == 0)
RR[RR_zero] = 1
In [ ]:
%%time
start_time=time.time()
counts_DR=BTRLC.two_point_correlation(dat,bins)
print counts_DR
end_time=time.time()
tottime=end_time-start_time
print "Total run time:"
print tottime
with open('BTR200kcDRLCf.pkl', 'w') as f:
pickle.dump(counts_DR,f)
In [ ]:
with open('BTR200kcDRLCf.pkl') as f:
counts_DR = pickle.load(f)
counts_DR
In [ ]:
DR=np.diff(counts_DR)
In [ ]:
DR
In [ ]:
corrells=(4.0 * DD - 4.0 * DR + RR) / RR
In [ ]:
corrells
In [ ]:
plt.plot(bins[1:len(bins)],corrells,'go-')
In [ ]:
plt.plot(bins[1:len(bins)],bins[1:len(bins)]*bins[1:len(bins)]*corrells*(c*1e-5)**2,'go-')
In [ ]:
plt.plot(bins[2:len(bins)],bins[2:len(bins)]*bins[2:len(bins)]*corrells[1:len(bins)]*(c*1e-5)**2,'go-')
In [ ]:
plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-')
In [ ]:
plt.plot(bins[2:len(bins)],corrells[1:len(bins)],'go-')
plt.savefig("correl2xlsLCf.pdf")
In [ ]:
plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'bo-')
plt.savefig("correl2x1lsLCf.pdf")
In [ ]:
plt.yscale('log')
plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-')
plt.savefig("correllsfiglogLCf.pdf")
In [ ]:
plt.yscale('log')
plt.plot(bins[2:len(bins)]*c/1e5,corrells[1:len(bins)],'ro-')
plt.savefig("correllslog2xLCf.pdf")
In [ ]:
plt.yscale('log')
plt.xscale('log')
plt.plot(bins[1:len(bins)]*c/1e5,corrells,'bo-')
plt.savefig("correllsloglogLCf.pdf")
In [ ]:
In [ ]:
In [ ]:
from functools import partial
def harvester(text, case):
X = case[0]
return text + str(X)
partial_harvester = partial(harvester, case=RAW_DATASET)
partial_qr=partial(BTD.query_radius,count_only=True)
if __name__ == '__main__':
pool = multiprocessing.Pool(processes=6)
case_data = RAW_DATASET
pool.map(partial_harvester, case_data, 1)
pool.close()
pool.join()
mapfunc = partial(BTD.query_radius, count_only=True)
map(mapfunc, volume_ids)
In [ ]:
In [ ]:
#ascii.write("DR72DDbinned.dat",(bins[1:len(bins)],DDresult))
start_time=time.time()
@pickle_results("DR72DDmp1.pkl")
def ddcal(BTD,dat,bins,Nbins):
counts_DD=np.zeros(Nbins)
for i in tqdm(range(Nbins)):
counts_DD[i]=np.sum(BTD.query_radius(dat, bins[i],count_only=True))
DD = np.diff(counts_DD)
print counts_DD
print DD
return DD
def mf_wrap(args):
return ddcal(*args)
pool=mp.Pool(8)
arg=[(BTD,dat,bins,Nbins)]
%timeit DDresult=pool.map(mf_wrap,arg)
#DDresult = ddcal(BTD,dat,bins,Nbins)
end_time=time.time()
tottime=end_time-start_time
print "Total run time:"
print tottime
In [ ]:
%timeit dat
In [ ]:
DDresult[0]
In [ ]:
DDresult[1]
In [ ]:
plt.plot(bins[1:len(bins)],DDresult[0],'ro')
In [ ]:
In [ ]:
In [ ]:
def myfun(a,b):
print a + b
return a+b
def mf_wrap(args):
return myfun(*args)
p = mp.Pool(4)
fl = [(a,b) for a in range(3) for b in range(2)]
p.map(mf_wrap, fl)
In [ ]:
In [ ]:
In [ ]:
In [ ]:
counts_DD=np.zeros(Nbins)
for i in range(Nbins):
counts_DD[i]=np.sum(BTD.query_radius(dat, bins[i],count_only=True))
DD = np.diff(counts_DD)
In [ ]:
print counts_DD
print DD
In [ ]:
plt.plot(bins[1:len(bins)],DD,'ro')
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
dataR=fits.open("/Users/rohin/Downloads/random-DR7-Full.fits")
In [ ]:
dataR=dataR[1].data
In [ ]:
len(dataR)
In [ ]:
In [ ]:
tdata=np.array(data)
In [ ]:
type(tdata[4])
In [ ]:
tdata.shape
In [ ]:
In [ ]:
tdata.shape
In [ ]:
tdata=np.atleast_d(tdata)
In [ ]:
tdata.shape
In [ ]:
tdata.reshape(len(tdata),3)
In [ ]:
tdata=np.asarray(data)
tdata=tdata.transpose()
In [ ]:
In [ ]:
In [ ]:
tdata
In [ ]:
len(tdata)
In [ ]:
In [ ]:
BTD.two_point_correlationpoint_correlationpoint_correlationpoint_correlationtime
stime=time.time()
tpcf=BTD.two_point_correlation(dat,bins)
print time.time()-stime
print tpcf
plt.plot(bins,tpcf)
In [ ]:
stime=time.time()
tpcfd=BTD.two_point_correlation(dat,bins,dualtree=True)
print time.time()-stime
print tpcfd
plt.plot(bins,tpcfd)
In [ ]:
In [ ]:
In [ ]:
In [ ]:
X
In [ ]:
In [ ]:
np.random.seed(0)
X = np.random.random((30,3))
r = np.linspace(0, 1, 10)
tree = BallTree(X,metric='pyfunc',func=LCDMmetric)
s = pickle.dumps(tree)
treedump = pickle.loads(s)
treedump.two_point_correlation(X,r)
In [ ]:
BT_D = BallTree(data)
BT_R = BallTree(data_R)
counts_DD = np.zeros(Nbins + 1)
counts_RR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DD[i] = np.sum(BT_D.query_radius(data, bins[i],
count_only=True))
counts_RR[i] = np.sum(BT_R.query_radius(data_R, bins[i],
count_only=True))
DD = np.diff(counts_DD)
RR = np.diff(counts_RR)
# check for zero in the denominator
RR_zero = (RR == 0)
RR[RR_zero] = 1
if method == 'standard':
corr = factor ** 2 * DD / RR - 1
elif method == 'landy-szalay':
if sklearn_has_two_point:
counts_DR = KDT_R.two_point_correlation(data, bins)
else:
counts_DR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DR[i] = np.sum(BT_R.query_radius(data, bins[i],
count_only=True))
DR = np.diff(counts_DR)
corr = (factor ** 2 * DD - 2 * factor * DR + RR) / RR
corr[RR_zero] = np.nan
return corr
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
dr7fdat=np.array([data['s'][0:300] data['rar'][0:300] data['decr'][0:300]])
dr7fdat
In [ ]:
dr7fdat[2]
In [ ]:
In [ ]:
def LCDMmetric(p1,p2):
costheta=m.sin(dec1)*m.sin(dec2)+m.cos(dec1)*m.cos(dec2)*m.cos(ra1-ra2)
s1=DC_LCDM(z1)
s2=DC_LCDM(z2)
return np.sqrt(s1**2+s2**2-2.0*s1*s2*costheta)
In [ ]:
In [ ]:
#fdata=fits.open("/Users/rohin/Downloads/DR7-Full.fits")
In [ ]:
#fdata.writeto("./output/DR7fulltrim.fits")
In [ ]:
fdata=fits.open("./output/DR7fulltrim.fits")
In [ ]:
cols=fdata[1].columns
In [ ]:
cols.del_col('ZTYPE')
In [ ]:
cols.del_col('SECTOR')
cols.del_col('FGOTMAIN')
cols.del_col('QUALITY')
cols.del_col('ISBAD')
cols.del_col('M')
cols.del_col('MMAX')
cols.del_col('ILSS')
cols.del_col('ICOMB')
cols.del_col('VAGC_SELECT')
cols.del_col('LSS_INDEX')
cols.del_col('FIBERWEIGHT')
cols.del_col('PRIMTARGET')
cols.del_col('MG')
cols.del_col('SECTOR_COMPLETENESS')
cols.del_col('COMOV_DENSITY')
cols.del_col('RADIAL_WEIGHT')
In [ ]:
fdata[1].columns
In [ ]:
fdata.writeto("./output/DR7fullzradec.fits")
In [ ]:
fdat=fits.open("./output/DR7fullzradec.fits")
In [ ]:
fdat[1].columns
In [ ]:
fdat[1].data['Z']
In [ ]:
fdat[1].data['RA']
In [ ]:
In [ ]:
comovlcdm=DC_LCDM(fdat[1].data['Z'])
In [ ]:
fdat[1].data['Z']
In [ ]:
comovlcdm
In [ ]:
comovlcdm.dtype
In [ ]:
#cols=fdat[1].columns
In [ ]:
nc=fits.Column(name='COMOV',format='D',array=comovlcdm)
In [ ]:
nc1=fits.Column(name='COMOV',format='D')
In [ ]:
fdata[1].data['Z']
In [ ]:
fdata[1].data['RA']
In [ ]:
nc
In [ ]:
nc.dtype
In [ ]:
#cols.add_col(nc)
In [ ]:
fdat[1].columns
In [ ]:
fdat[1].columns.info()
In [ ]:
fdat[1].columns.add_col(nc1)
In [ ]:
fdat[1].data['COMOV']=comovlcdm
In [ ]:
comovlcdm
In [ ]:
fdat[1].data['Z']
In [ ]:
fdat[1].data['COMOV']
In [ ]:
fdat[1].data['RA']
In [ ]:
fdat[1].data['RA']=fdat[1].data['RA']*pi/180.0
In [ ]:
comovlcdm=DC_LCDM(fdat[1].data['Z'])
comovlcdm
In [ ]:
In [ ]:
Random catalog created based on the survey limitations also taken from http://cosmo.nyu.edu/~eak306/SDSS-LRG.html
In [ ]:
dataR=fits.open("/Users/rohin/Downloads/random-DR7-Full.fits")
In [ ]:
dataR
In [ ]:
dataR=dataR[1].data
In [ ]:
len(dataR)
In [ ]:
NSIDE=512
dr72hpix=hu.HealPix("ring",NSIDE)
In [ ]:
pixdata = open("./output/pixdatadr72VAGCfullrand.dat",'w')
pixdata.write("z\t pix \n")
for i in range(0,len(data)-1):
pixdata.write("%f\t" %data['z'][i])
pixdata.write("%d\n" %dr72hpix.eq2pix(dataR['ra'][i],dataR['dec'][i]))
pixdata.close()
In [ ]:
pixdata = ascii.read("./output/pixdatadr72VAGCfullrand.dat")
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
In [ ]:
hpixdata
In [ ]:
hu.mollview(hpixdata,rot=180)
In [ ]:
hu.orthview(hpixdata)
In [ ]:
"""
Tools for computing two-point correlation functions.
"""
#from .utils import check_random_state
# From scikit-learn utilities:
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (int, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
# Check if scikit-learn's two-point functionality is available.
# This was added in scikit-learn version 0.14
try:
from sklearn.neighbors import KDTree
sklearn_has_two_point = True
except ImportError:
import warnings
sklearn_has_two_point = False
def uniform_sphere(RAlim, DEClim, size=1):
"""Draw a uniform sample on a sphere
Parameters
----------
RAlim : tuple
select Right Ascension between RAlim[0] and RAlim[1]
units are degrees
DEClim : tuple
select Declination between DEClim[0] and DEClim[1]
size : int (optional)
the size of the random arrays to return (default = 1)
Returns
-------
RA, DEC : ndarray
the random sample on the sphere within the given limits.
arrays have shape equal to size.
"""
zlim = np.sin(np.pi * np.asarray(DEClim) / 180.)
z = zlim[0] + (zlim[1] - zlim[0]) * np.random.random(size)
DEC = (180. / np.pi) * np.arcsin(z)
RA = RAlim[0] + (RAlim[1] - RAlim[0]) * np.random.random(size)
return RA, DEC
def ra_dec_to_xyz(ra, dec):
"""Convert ra & dec to Euclidean points
Parameters
----------
ra, dec : ndarrays
Returns
x, y, z : ndarrays
"""
sin_ra = np.sin(ra * np.pi / 180.)
cos_ra = np.cos(ra * np.pi / 180.)
sin_dec = np.sin(np.pi / 2. - dec * np.pi / 180.)
cos_dec = np.cos(np.pi / 2. - dec * np.pi / 180.)
return (cos_ra * sin_dec,
sin_ra * sin_dec,
cos_dec)
def angular_dist_to_euclidean_dist(D, r=1):
"""convert angular distances to euclidean distances"""
return 2 * r * np.sin(0.5 * D * np.pi / 180.)
def two_point(data, bins, method='standard',
data_R=None, random_state=None):
"""Two-point correlation function
Parameters
----------
data : array_like
input data, shape = [n_samples, n_features]
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
data_R : array_like (optional)
if specified, use this as the random comparison sample
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
"""
data = np.asarray(data)
bins = np.asarray(bins)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if data.ndim == 1:
data = data[:, np.newaxis]
elif data.ndim != 2:
raise ValueError("data should be 1D or 2D")
n_samples, n_features = data.shape
Nbins = len(bins) - 1
# shuffle all but one axis to get background distribution
if data_R is None:
data_R = data.copy()
for i in range(n_features - 1):
rng.shuffle(data_R[:, i])
else:
data_R = np.asarray(data_R)
if (data_R.ndim != 2) or (data_R.shape[-1] != n_features):
raise ValueError('data_R must have same n_features as data')
factor = len(data_R) * 1. / len(data)
if sklearn_has_two_point:
# Fast two-point correlation functions added in scikit-learn v. 0.14
KDT_D = KDTree(data)
KDT_R = KDTree(data_R)
counts_DD = KDT_D.two_point_correlation(data, bins)
counts_RR = KDT_R.two_point_correlation(data_R, bins)
else:
warnings.warn("Version 0.3 of astroML will require scikit-learn "
"version 0.14 or higher for correlation function "
"calculations. Upgrade to sklearn 0.14+ now for much "
"faster correlation function calculations.")
BT_D = BallTree(data)
BT_R = BallTree(data_R)
counts_DD = np.zeros(Nbins + 1)
counts_RR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DD[i] = np.sum(BT_D.query_radius(data, bins[i],
count_only=True))
counts_RR[i] = np.sum(BT_R.query_radius(data_R, bins[i],
count_only=True))
DD = np.diff(counts_DD)
RR = np.diff(counts_RR)
# check for zero in the denominator
RR_zero = (RR == 0)
RR[RR_zero] = 1
if method == 'standard':
corr = factor ** 2 * DD / RR - 1
elif method == 'landy-szalay':
if sklearn_has_two_point:
counts_DR = KDT_R.two_point_correlation(data, bins)
else:
counts_DR = np.zeros(Nbins + 1)
for i in range(Nbins + 1):
counts_DR[i] = np.sum(BT_R.query_radius(data, bins[i],
count_only=True))
DR = np.diff(counts_DR)
corr = (factor ** 2 * DD - 2 * factor * DR + RR) / RR
corr[RR_zero] = np.nan
return corr
def bootstrap_two_point(data, bins, Nbootstrap=10,
method='standard', return_bootstraps=False,
random_state=None):
"""Bootstrapped two-point correlation function
Parameters
----------
data : array_like
input data, shape = [n_samples, n_features]
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
Nbootstrap : integer
number of bootstrap resamples to perform (default = 10)
method : string
"standard" or "landy-szalay".
return_bootstraps: bool
if True, return full bootstrapped samples
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr, corr_err : ndarrays
the estimate of the correlation function and the bootstrap
error within each bin. shape = Nbins
"""
data = np.asarray(data)
bins = np.asarray(bins)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if data.ndim == 1:
data = data[:, np.newaxis]
elif data.ndim != 2:
raise ValueError("data should be 1D or 2D")
if Nbootstrap < 2:
raise ValueError("Nbootstrap must be greater than 1")
n_samples, n_features = data.shape
# get the baseline estimate
corr = two_point(data, bins, method=method, random_state=rng)
bootstraps = np.zeros((Nbootstrap, len(corr)))
for i in range(Nbootstrap):
indices = rng.randint(0, n_samples, n_samples)
bootstraps[i] = two_point(data[indices, :], bins, method=method,
random_state=rng)
# use masked std dev in case of NaNs
corr_err = np.asarray(np.ma.masked_invalid(bootstraps).std(0, ddof=1))
if return_bootstraps:
return corr, corr_err, bootstraps
else:
return corr, corr_err
def two_point_angular(ra, dec, bins, method='standard', random_state=None):
"""Angular two-point correlation function
A separate function is needed because angular distances are not
euclidean, and random sampling needs to take into account the
spherical volume element.
Parameters
----------
ra : array_like
input right ascention, shape = (n_samples,)
dec : array_like
input declination
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
"""
ra = np.asarray(ra)
dec = np.asarray(dec)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if (ra.ndim != 1) or (dec.ndim != 1) or (ra.shape != dec.shape):
raise ValueError('ra and dec must be 1-dimensional '
'arrays of the same length')
n_features = len(ra)
Nbins = len(bins) - 1
# draw a random sample with N points
ra_R, dec_R = uniform_sphere((min(ra), max(ra)),
(min(dec), max(dec)),
2 * len(ra))
data = np.asarray(ra_dec_to_xyz(ra, dec), order='F').T
data_R = np.asarray(ra_dec_to_xyz(ra_R, dec_R), order='F').T
# convert spherical bins to cartesian bins
bins_transform = angular_dist_to_euclidean_dist(bins)
return two_point(data, bins_transform, method=method,
data_R=data_R, random_state=rng)
def bootstrap_two_point_angular(ra, dec, bins, method='standard',
Nbootstraps=10, random_state=None):
# type: (object, object, object, object, object, object) -> object
"""Angular two-point correlation function
A separate function is needed because angular distances are not
euclidean, and random sampling needs to take into account the
spherical volume element.
Parameters
----------
ra : array_like
input right ascention, shape = (n_samples,)
dec : array_like
input declination
bins : array_like
bins within which to compute the 2-point correlation.
shape = Nbins + 1
method : string
"standard" or "landy-szalay".
Nbootstraps : int
number of bootstrap resamples
random_state : integer, np.random.RandomState, or None
specify the random state to use for generating background
Returns
-------
corr : ndarray
the estimate of the correlation function within each bin
shape = Nbins
dcorr : ndarray
error estimate on dcorr (sample standard deviation of
bootstrap resamples)
bootstraps : ndarray
The full sample of bootstraps used to compute corr and dcorr
"""
ra = np.asarray(ra)
dec = np.asarray(dec)
rng = check_random_state(random_state)
if method not in ['standard', 'landy-szalay']:
raise ValueError("method must be 'standard' or 'landy-szalay'")
if bins.ndim != 1:
raise ValueError("bins must be a 1D array")
if (ra.ndim != 1) or (dec.ndim != 1) or (ra.shape != dec.shape):
raise ValueError('ra and dec must be 1-dimensional '
'arrays of the same length')
n_features = len(ra)
Nbins = len(bins) - 1
data = np.asarray(ra_dec_to_xyz(ra, dec), order='F').T
# convert spherical bins to cartesian bins
bins_transform = angular_dist_to_euclidean_dist(bins)
bootstraps = []
for i in range(Nbootstraps):
# draw a random sample with N points
ra_R, dec_R = uniform_sphere((min(ra), max(ra)),
(min(dec), max(dec)),
2 * len(ra))
data_R = np.asarray(ra_dec_to_xyz(ra_R, dec_R), order='F').T
if i > 0:
# random sample of the data
ind = np.random.randint(0, data.shape[0], data.shape[0])
data_b = data[ind]
else:
data_b = data
bootstraps.append(two_point(data_b, bins_transform, method=method,
data_R=data_R, random_state=rng))
bootstraps = np.asarray(bootstraps)
corr = np.mean(bootstraps, 0)
corr_err = np.std(bootstraps, 0, ddof=1)
return corr, corr_err, bootstraps
In [ ]:
sklearn_has_two_point
In [ ]:
help(KDTree)
In [ ]:
dataxyz=ra_dec_to_xyz(data['ra'],data['dec'])
In [ ]:
dataxyz=np.asarray(dataxyz)
In [ ]:
dataxyz=dataxyz.transpose()
In [ ]:
dataxyz
In [ ]:
dataxyzR=ra_dec_to_xyz(dataR['ra'],dataR['dec'])
In [ ]:
dataxyzR=np.asarray(dataxyzR)
In [ ]:
dataxyzR=dataxyzR.transpose()
In [ ]:
dataxyzR
In [ ]:
bins=np.arange(0.0,1.05,0.05)
In [ ]:
bins
In [ ]:
In [ ]:
#@pickle_results("tpcf_std.pkl")
tpcf=two_point(dataxyz,bins,method='standard',data_R=dataxyzR, random_state=None)
In [ ]:
tpcf
In [ ]:
plt.plot(bins[1:],tpcf,'ro')
In [ ]:
tpcfam=two_point(dataxyz,bins,method='standard',data_R=None, random_state=None)
In [ ]:
tpcfam
In [ ]:
plt.plot(bins[1:],tpcfam,'bo')
In [ ]:
bins2=np.arange(0.2,0.6,0.02)
In [ ]:
tpcfamb2=two_point(dataxyz,bins2,method='standard',data_R=None, random_state=None)
In [ ]:
plt.plot(bins2[1:],tpcfamb2,'go')
The above doesn't show any BAO feature... It used inbuilt astroML method to generate random catalog... by shuffling the original data's content... That way all of the random points fall in the same survey area and will adhere to all the filtering criteria... the factor or ratio of data pts vs. random pts will be 1... instead of large no. in case if we take existing random catalog or create one
In [ ]:
rng = check_random_state(None)
n_samples, n_features = dataxyz.shape
Nbins = len(bins) - 1
# shuffle all but one axis to get background distribution
data_Rxyz = dataxyz.copy()
print data_Rxyz
for i in range(n_features - 1):
rng.shuffle(data_Rxyz[:, i])
print data_Rxyz
Lets see how it looks with a healpix map
In [ ]:
In [ ]:
NSIDE=512
dr72hpix=hu.HealPix("ring",NSIDE)
In [ ]:
import math as m
def cart2sph(x,y,z):
XsqPlusYsq = x**2 + y**2
r = m.sqrt(XsqPlusYsq + z**2) # r
elev = m.atan2(z,m.sqrt(XsqPlusYsq)) # theta
az = m.atan2(y,x) # phi
return r, elev, az
def cart2sphA(pts):
return np.array([cart2sph(x,y,z) for x,y,z in pts])
def appendSpherical(xyz):
np.hstack((xyz, cart2sphA(xyz)))
In [ ]:
ang=cart2sphA(data_Rxyz)
In [ ]:
ang
In [ ]:
ang.shape
In [ ]:
#ang.resize((105831, 2))
np.squeeze(ang, axis=None)
In [ ]:
help(ang.squeeze)
In [ ]:
ang2=ang[:,1:]
In [ ]:
ang2
In [ ]:
ang2.shape
In [ ]:
ang2[2,0]
In [ ]:
In [ ]:
pixdata = open("./output/pixdatadr72VAGCfullrandam.dat",'w')
pixdata.write("pix \n")
for i in range(0,len(ang2)-1):
#pixdata.write("%f\t" %data['z'][i])
pixdata.write("%d\n" %dr72hpix.ang2pix(ang2[i,0],ang2[i,1]))
pixdata.close()
In [ ]:
pixdata = ascii.read("./output/pixdatadr72VAGCfullrandam.dat")
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
In [ ]:
hpixdata
In [ ]:
hu.mollview(hpixdata,rot=180)
In [ ]:
hu.orthview(hpixdata)
This method doesnt seem to produce right random catalogs...doing it with ra and dec as follows
In [ ]:
data
In [ ]:
data['z'],data['ra'],data['dec']
In [ ]:
datzradec=np.array([data['z'], data['ra'], data['dec']])
In [ ]:
datzradec
In [ ]:
rng = check_random_state(None)
n_features, n_samples = datzradec.shape
# shuffle all but one axis to get background distribution
data_Rzradec = datzradec.copy()
print data_Rzradec
for i in range(1,n_features):
rng.shuffle(data_Rzradec[:, i])
print data_Rzradec
In [ ]:
min(data_Rzradec[:, 1])
In [ ]:
max(data_Rzradec[:, 1])
In [ ]:
min(data_Rzradec[:, 2])
In [ ]:
max(data_Rzradec[:, 2])
In [ ]:
In [ ]:
min(datzradec[:, 1])
In [ ]:
max(datzradec[:, 1])
In [ ]:
min(datzradec[:, 2])
In [ ]:
max(datzradec[:, 2])
In [ ]:
range(1,3)
In [ ]:
In [ ]:
In [ ]:
help(rng.shuffle)
In [ ]:
n_samples
In [ ]:
n_features
In [ ]:
data_Rzradec
In [ ]:
data_Rzradec[0][2]
In [ ]:
len(data_Rzradec[0][:])
In [ ]:
data_Rzradec[0][:]
In [ ]:
pixdata = open("./output/pixdatadr72VAGCfullrandamrd.dat",'w')
pixdata.write("z\t pix \n")
for i in range(0,len(data_Rzradec[0][:])-1):
pixdata.write("%f\t" %data_Rzradec[0][i])
pixdata.write("%d\n" %dr72hpix.eq2pix(data_Rzradec[1][i],data_Rzradec[2][i]))
pixdata.close()
In [ ]:
pixdata = ascii.read("./output/pixdatadr72VAGCfullrandamrd.dat")
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
In [ ]:
hpixdata
In [ ]:
hu.mollview(hpixdata,rot=180)
In [ ]:
hu.orthview(hpixdata)
In [ ]:
dataxyz
In [ ]:
dataxyzR1=ra_dec_to_xyz(data_Rzradec[1][:],data_Rzradec[2][:])
In [ ]:
data_Rzradec[1][:]
In [ ]:
In [ ]:
In [ ]:
dataxyzR1
In [ ]:
dataxyzR1=np.asarray(dataxyzR1)
In [ ]:
In [ ]:
dataxyzR1=dataxyzR1.transpose()
In [ ]:
dataxyzR1
In [ ]:
bins=np.arange(0.025,1.025,0.025)
In [ ]:
bins
In [ ]:
In [ ]:
#@pickle_results("tpcf_std.pkl")
tpcf=two_point(dataxyz,bins,method='standard',data_R=dataxyzR1, random_state=None)
In [ ]:
tpcf
In [ ]:
plt.plot(bins[1:],tpcf,'ro')
In [ ]:
bins=np.arange(0.0,1.05,0.05)
In [ ]:
#@pickle_results("tpcf_std.pkl")
tpcf=two_point(dataxyz,bins,method='standard',data_R=dataxyzR1, random_state=None)
In [ ]:
tpcf
In [ ]:
plt.plot(bins[1:],tpcf,'ro')
In [ ]:
btpcf=bootstrap_two_point(dataxyz, bins, Nbootstrap=10,
method='standard', return_bootstraps=False,
random_state=None)
In [ ]:
btpcf
In [ ]:
plt.errorbar(bins[1:],btpcf[0],yerr=btpcf[1],fmt='ro-')
In [ ]:
help(plt.errorbar)
In [ ]:
@pickle_results("tpcf_ls.pkl")
tpcfls=two_point(dataxyz,bins,method='landy-szalay',
data_R=dataxyzR, random_state=None)
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
#------------------------------------------------------------
# Set up correlation function computation
# This calculation takes a long time with the bootstrap resampling,
# so we'll save the results.
@pickle_results("correlation_functionsdr72.pkl")
def compute_results(Nbins=16, Nbootstraps=10, method='landy-szalay', rseed=0):
np.random.seed(rseed)
bins = 10 ** np.linspace(np.log10(1. / 60.), np.log10(6), 16)
results = [bins]
for D in [data]:
results += bootstrap_two_point_angular(D['ra'],
D['dec'],
bins=bins,
method=method,
Nbootstraps=Nbootstraps)
return results
(bins, r_corr, r_corr_err, r_bootstraps) = compute_results()
bin_centers = 0.5 * (bins[1:] + bins[:-1])
In [ ]:
bins
In [ ]:
r_corr
In [ ]:
r_corr_err
In [ ]:
r_bootstraps
In [ ]:
#------------------------------------------------------------
# Plot the results
label = '$0.15<z<0.25$\n$N=33813$'
fig = plt.figure(figsize=(6, 6))
plt.xscale('log')
plt.yscale('log')
plt.errorbar(bin_centers, r_corr, r_corr_err,fmt='.k', ecolor='gray', lw=1)
fig.text(0.8, 0.8, label, ha='right', va='top')
plt.xlabel(r'$\theta\ (deg)$')
plt.ylabel(r'$w(\theta)$')
plt.show()
plt.show()
fig.savefig("wth_dr72015025.pdf")
In [ ]:
In [ ]:
data=ascii.read('./input/sdssdr72_sorted_z.dat')
In [ ]:
data
In [ ]:
#m_max = 19
# redshift and magnitude cuts
data = data[data['z'] > 0.05]
data = data[data['z'] < 0.15]
#data = data[data['petroMag_r'] < m_max]
# RA/DEC cuts
RAmin, RAmax = 140, 220
DECmin, DECmax = 5, 45
data = data[data['ra'] < RAmax]
data = data[data['ra'] > RAmin]
data = data[data['dec'] < DECmax]
data = data[data['dec'] > DECmin]
#ur = data['modelMag_u'] - data['modelMag_r']
#flag_red = (ur > 2.22)
#flag_blue = ~flag_red
#datag
print "data size:"
print " total gals: ", len(data)
#print " blue gals:", len(data_blue)
In [ ]:
NSIDE=512
dr72hpix=hu.HealPix("ring",NSIDE)
In [ ]:
pixdata = open("./output/pixdatadr72005015.dat",'w')
pixdata.write("z\t pix \n")
for i in range(0,len(data)-1):
pixdata.write("%f\t" %data['z'][i])
pixdata.write("%d\n" %dr72hpix.eq2pix(data['ra'][i],data['dec'][i]))
pixdata.close()
In [ ]:
pixdata = ascii.read("./output/pixdatadr72005015.dat")
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
In [ ]:
hpixdata
In [ ]:
hu.mollview(hpixdata,rot=180)
In [ ]:
hu.orthview(hpixdata)
In [ ]:
#------------------------------------------------------------
# Set up correlation function computation
# This calculation takes a long time with the bootstrap resampling,
# so we'll save the results.
@pickle_results("correlation_functionsdr720515.pkl")
def compute_results(Nbins=16, Nbootstraps=10, method='landy-szalay', rseed=0):
np.random.seed(rseed)
bins = 10 ** np.linspace(np.log10(1. / 60.), np.log10(6), 16)
results = [bins]
for D in [data]:
results += bootstrap_two_point_angular(D['ra'],
D['dec'],
bins=bins,
method=method,
Nbootstraps=Nbootstraps)
return results
(bins, r_corr, r_corr_err, r_bootstraps) = compute_results()
bin_centers = 0.5 * (bins[1:] + bins[:-1])
In [ ]:
bins
In [ ]:
r_corr
In [ ]:
r_corr_err
In [ ]:
r_bootstraps
In [ ]:
#------------------------------------------------------------
# Plot the results
label = '$0.05<z<0.15$\n$N=138051$'
fig = plt.figure(figsize=(6, 6))
plt.xscale('log')
plt.yscale('log')
plt.errorbar(bin_centers, r_corr, r_corr_err,fmt='.k', ecolor='gray', lw=1)
fig.text(0.8, 0.8, label, ha='right', va='top')
plt.xlabel(r'$\theta\ (deg)$')
plt.ylabel(r'$w(\theta)$')
plt.show()
fig.savefig("wth_dr72005015.pdf")
In [ ]:
plt.errorbar(bins[0:len(bins)-1],r_corr,r_corr_err)
In [ ]:
data=ascii.read('./input/sdssdr72_sorted_z.dat')
In [ ]:
data
In [ ]:
data['z']
In [ ]:
#m_max = 19
# redshift and magnitude cuts
data = data[data['z'] > 0.05]
data = data[data['z'] <= 0.10]
#data = data[data['petroMag_r'] < m_max]
# RA/DEC cuts
RAmin, RAmax = 140, 220
DECmin, DECmax = 5, 45
data = data[data['ra'] < RAmax]
data = data[data['ra'] > RAmin]
data = data[data['dec'] < DECmax]
data = data[data['dec'] > DECmin]
#ur = data['modelMag_u'] - data['modelMag_r']
#flag_red = (ur > 2.22)
#flag_blue = ~flag_red
#datag
print "data size:"
print " total gals: ", len(data)
#print " blue gals:", len(data_blue)
In [ ]:
NSIDE=512
dr72hpix=hu.HealPix("ring",NSIDE)
In [ ]:
pixdata = open("./output/pixdatadr7200501.dat",'w')
pixdata.write("z\t pix \n")
for i in range(0,len(data)-1):
pixdata.write("%f\t" %data['z'][i])
pixdata.write("%d\n" %dr72hpix.eq2pix(data['ra'][i],data['dec'][i]))
pixdata.close()
In [ ]:
pixdata = ascii.read("./output/pixdatadr7200501.dat")
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
In [ ]:
hpixdata
In [ ]:
hu.mollview(hpixdata,rot=180)
In [ ]:
hu.orthview(hpixdata)
In [ ]:
#------------------------------------------------------------
# Set up correlation function computation
# This calculation takes a long time with the bootstrap resampling,
# so we'll save the results.
@pickle_results("correlation_functionsdr720501.pkl")
def compute_results(Nbins=16, Nbootstraps=10, method='landy-szalay', rseed=0):
np.random.seed(rseed)
bins = 10 ** np.linspace(np.log10(1. / 60.), np.log10(6), 16)
results = [bins]
for D in [data]:
results += bootstrap_two_point_angular(D['ra'],
D['dec'],
bins=bins,
method=method,
Nbootstraps=Nbootstraps)
return results
(bins, r_corr, r_corr_err, r_bootstraps) = compute_results()
bin_centers = 0.5 * (bins[1:] + bins[:-1])
In [ ]:
bins
In [ ]:
r_corr
In [ ]:
r_corr_err
In [ ]:
r_bootstraps
In [ ]:
#------------------------------------------------------------
# Plot the results
label = '$0.05<z<0.10$\n$N=78939$'
fig = plt.figure(figsize=(6, 6))
plt.xscale('log')
plt.yscale('log')
plt.errorbar(bin_centers, r_corr, r_corr_err,fmt='.k', ecolor='gray', lw=1)
fig.text(0.8, 0.8, label, ha='right', va='top')
plt.xlabel(r'$\theta\ (deg)$')
plt.ylabel(r'$w(\theta)$')
plt.show()
fig.savefig("wth_dr720501.pdf")
In [ ]:
plt.errorbar(bins[0:len(bins)-1],r_corr,r_corr_err)
In [ ]:
In [ ]:
data=ascii.read('./input/sdssdr72_sorted_z.dat')
In [ ]:
data
In [ ]:
data['z']
In [ ]:
#m_max = 19
# redshift and magnitude cuts
data = data[data['z'] > 0.10]
data = data[data['z'] <= 0.15]
#data = data[data['petroMag_r'] < m_max]
# RA/DEC cuts
RAmin, RAmax = 140, 220
DECmin, DECmax = 5, 45
data = data[data['ra'] < RAmax]
data = data[data['ra'] > RAmin]
data = data[data['dec'] < DECmax]
data = data[data['dec'] > DECmin]
#ur = data['modelMag_u'] - data['modelMag_r']
#flag_red = (ur > 2.22)
#flag_blue = ~flag_red
#datag
print "data size:"
print " total gals: ", len(data)
#print " blue gals:", len(data_blue)
In [ ]:
NSIDE=512
dr72hpix=hu.HealPix("ring",NSIDE)
In [ ]:
pixdata = open("./output/pixdatadr72001015.dat",'w')
pixdata.write("z\t pix \n")
for i in range(0,len(data)-1):
pixdata.write("%f\t" %data['z'][i])
pixdata.write("%d\n" %dr72hpix.eq2pix(data['ra'][i],data['dec'][i]))
pixdata.close()
In [ ]:
pixdata = ascii.read("./output/pixdatadr72001015.dat")
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
In [ ]:
hpixdata
In [ ]:
hu.mollview(hpixdata,rot=180)
In [ ]:
hu.orthview(hpixdata)
In [ ]:
#------------------------------------------------------------
# Set up correlation function computation
# This calculation takes a long time with the bootstrap resampling,
# so we'll save the results.
@pickle_results("correlation_functionsdr72001015.pkl")
def compute_results(Nbins=16, Nbootstraps=10, method='landy-szalay', rseed=0):
np.random.seed(rseed)
bins = 10 ** np.linspace(np.log10(1. / 60.), np.log10(6), 16)
results = [bins]
for D in [data]:
results += bootstrap_two_point_angular(D['ra'],
D['dec'],
bins=bins,
method=method,
Nbootstraps=Nbootstraps)
return results
(bins, r_corr, r_corr_err, r_bootstraps) = compute_results()
bin_centers = 0.5 * (bins[1:] + bins[:-1])
In [ ]:
bins
In [ ]:
r_corr
In [ ]:
r_corr_err
In [ ]:
r_bootstraps
In [ ]:
#------------------------------------------------------------
# Plot the results
label = '$0.10<z<0.15$\n$N=59112$'
fig = plt.figure(figsize=(6, 6))
plt.xscale('log')
plt.yscale('log')
plt.errorbar(bin_centers, r_corr, r_corr_err,fmt='.k', ecolor='gray', lw=1)
fig.text(0.8, 0.8, label, ha='right', va='top')
plt.xlabel(r'$\theta\ (deg)$')
plt.ylabel(r'$w(\theta)$')
plt.show()
fig.savefig("wth_dr7201015.pdf")
In [ ]:
plt.errorbar(bins[0:len(bins)-1],r_corr,r_corr_err)
In [ ]:
hu.mollview(hpixdatab,rot=180)
In [ ]:
hu.orthview(hpixdatab)
In [ ]:
In [ ]:
In [ ]:
In [ ]:
In [ ]:
help(hu.mollview)
In [ ]:
from astroML.datasets import fetch_sdss_specgals
from astroML.correlation import bootstrap_two_point_angular
In [ ]:
help(bootstrap_two_point_angular)
In [ ]:
help(astroML.correlation)
In [ ]:
import astroML.correlation
In [ ]:
import sklearn.neighbors
In [ ]:
help(sklearn.neighbors)
Sorted and reduced column set data can now be 'read' to reduce RAM requirements of the table reading.
In [ ]:
sdssdr72=ascii.read('./input/dssdr72_sorted_z.dat')
Create a healpix map with NSIDE=64 (no. of pixels = 49152 as $NPIX=12\times NSIDE^2$) because the no. of galaxies in the survey are less. For higher resolution (later for dr12) we will consider NSIDE=512 or even 1024. For now, we will create a 64 NSIDE map.
In [ ]:
NSIDE=64
dt72hpix=hu.HealPix("ring",NSIDE)
We have data of galaxies with redshifts between 0 and 0.5 ($0<z<0.5$). To look at a time slice/at a certain epoch we need to choose the list of galaxies within a redshift window. As, measurement of redshift has $\pm 0.05$ error. We can bin all the data into redshifts with range limited to 0.05 variation each. So, we have 10 databins with almost identical redshifts. We save each databin in a different file.
In [ ]:
j=0
for i in range(1,17):
pixdata = open("/home/rohin/Desktop/healpix/binned1/pixdata%d_%d.dat"%(NSIDE,i),'w')
pixdata.write("ra\t dec\t z\t pix \n")
#for j in range(len(sdssdr72)):
try:
while sdssdr72[j]['z']<0.03*i:
pixdata.write("%f\t" %sdssdr72[j]['ra'])
pixdata.write("%f\t" %sdssdr72[j]['dec'])
pixdata.write("%f\t" %sdssdr72[j]['z'])
pixdata.write("%d\n" %dt72hpix.eq2pix(sdssdr72[j]['ra'],sdssdr72[j]['dec']))
#print dt72hpix.eq2pix(sdssdr72[j]['ra'],sdssdr72[j]['dec'])
j=j+1
except:
pass
pixdata.close()
In [ ]:
for i in range(1,17):
pixdata = ascii.read("/home/rohin/Desktop/healpix/binned1/pixdata%d_%d.dat"%(NSIDE,i))
mpixdata = open("/home/rohin/Desktop/healpix/binned1/masked/pixdata%d_%d.dat"%(NSIDE,i),'w')
mpixdata.write("ra\t dec\t z\t pix \n")
for j in range((len(pixdata)-1)):
if 100<pixdata[j]['ra']<250:
mpixdata.write("%f\t" %pixdata[j]['ra'])
mpixdata.write("%f\t" %pixdata[j]['dec'])
mpixdata.write("%f\t" %pixdata[j]['z'])
mpixdata.write("%d\n" %pixdata[j]['pix'])
#pixdata.write("/home/rohin/Desktop/healpix/binned1/masked/pixdata_%d.dat"%i,format='ascii')
#print dt72hpix.eq2pix(sdssdr72[j]['ra'],sdssdr72[j]['dec'])
mpixdata.close()
We now, take each databin and assign the total no. of galaxies as the value of each pixel. The following routine will calculate the no. of galaxies by couting the occurence of pixel numbers in the file.
In [ ]:
pixdata = ascii.read("/home/rohin/Desktop/healpix/binned1/masked/pixdata%d_2.dat"%NSIDE)
hpixdata=np.array(np.zeros(hu.nside2npix(NSIDE)))
for j in range(len(pixdata)):
hpixdata[pixdata[j]['pix']]+=1
In [ ]:
hpixdata
In [ ]:
hu.orthview(hpixdata,rot=180)
In [ ]:
pixcl=hu.anafast(hpixdata,lmax=300)
ell = np.arange(len(pixcl))
plt.figure()
plt.plot(ell,np.log(pixcl))
plt.show()
In [ ]:
pixcl=hu.anafast(hpixdata,lmax=300)
ell = np.arange(len(pixcl))
plt.figure()
plt.plot(ell,np.sqrt(ell*(ell+1)*pixcl/(4*math.pi)))
plt.show()
In [ ]:
theta=np.arange(0,np.pi,0.001)
In [ ]:
correldat = np.polynomial.legendre.legval(np.cos(theta),(2*ell+1)*np.absolute(pixcl))/(4*math.pi)
In [ ]:
plt.figure()
plt.plot(theta[0:600]*180/math.pi,correldat[0:600])
plt.show()
In [ ]:
plt.figure()
plt.plot(theta*180/math.pi,correldat)
plt.show()
In [ ]:
randra,randdec=hu.randsphere(2200000)
In [ ]:
randhp=hu.HealPix("RING",NSIDE)
In [ ]:
randhppix=randhp.eq2pix(randra,randdec)
In [ ]:
randpixdat=np.array(np.zeros(hu.nside2npix(NSIDE)))
In [ ]:
for j in range(len(randhppix)):
randpixdat[randhppix[j]]+=1
In [ ]:
randmaphp=hu.mollview(randpixdat)
In [ ]:
randcl=hu.anafast(randpixdat,lmax=300)
ell = np.arange(len(randcl))
plt.figure()
plt.plot(ell,np.sqrt(ell*(ell+1)*randcl/(4*math.pi)))
plt.show()
In [ ]:
correlrand = np.polynomial.legendre.legval(np.cos(theta),(2*ell+1)*np.absolute(randcl))/(4*math.pi)
plt.figure()
plt.plot(theta[0:600]*180/math.pi,correlrand[0:600])
plt.show()
In [ ]:
finalcorrel=correldat-correlrand
plt.figure()
plt.plot(theta[0:600]*180/math.pi,finalcorrel[0:600])
plt.show()
In [ ]:
finalpix=hpixdata-randpixdat
In [ ]:
hu.mollview(finalpix,rot=180)
In [ ]:
cl=hu.anafast(finalpix,lmax=300)
ell = np.arange(len(cl))
plt.figure()
plt.plot(ell,np.sqrt(ell*(ell+1)*cl/(4*math.pi)))
plt.show()
In [ ]:
correlrand = np.polynomial.legendre.legval(np.cos(theta),(2*ell+1)*np.absolute(cl))/(4*math.pi)
plt.figure()
plt.plot(theta[0:600]*180/math.pi,correlrand[0:600])
plt.show()
In [ ]:
finalcl=pixcl-randcl
correlrand = np.polynomial.legendre.legval(np.cos(theta),(2*ell+1)*np.absolute(finalcl))/(4*math.pi)
plt.figure()
plt.plot(theta[0:600]*180/math.pi,correlrand[0:600])
plt.show()
In [ ]:
help(fits)
In [ ]:
data[1].data['z']
In [ ]: