In [1]:
%pylab inline
from __future__ import (division, print_function)
import os
import sys
import copy
import fnmatch
import warnings
import collections
import numpy as np
import scipy
try:
from scipy.stats import scoreatpercentile
except:
scoreatpercentile = False
from scipy.interpolate import interp1d
import cPickle as pickle
# Astropy
from astropy.io import fits
from astropy import units as u
from astropy.stats import sigma_clip
from astropy.table import Table, Column
from astropy.utils.console import ProgressBar
# AstroML
from astroML.plotting import hist
# Matplotlib related
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.ticker import NullFormatter
from matplotlib.ticker import MaxNLocator
# Matplotlib default settings
rcdef = plt.rcParams.copy()
pylab.rcParams['figure.figsize'] = 12, 10
pylab.rcParams['xtick.major.size'] = 8.0
pylab.rcParams['xtick.major.width'] = 2.5
pylab.rcParams['xtick.minor.size'] = 4.0
pylab.rcParams['xtick.minor.width'] = 2.5
pylab.rcParams['ytick.major.size'] = 8.0
pylab.rcParams['ytick.major.width'] = 2.5
pylab.rcParams['ytick.minor.size'] = 4.0
pylab.rcParams['ytick.minor.width'] = 2.5
# Personal
import hscUtils as hUtil
import galSBP
import coaddCutoutGalfitSimple as gSimple
import matplotlib.patches as mpatches
from matplotlib.patches import Ellipse
from matplotlib.collections import PatchCollection
import cosmology
c=cosmology.Cosmo(H0=70.0, omega_m=0.3, omega_l=0.7, flat=1)
In [2]:
# Absolute magnitude of sun in HSC filters
# Actuall borrowed from DES filters
# Values from magsun.data in FSPS
amag_sun_des_g = 5.08
amag_sun_des_r = 4.62
amag_sun_des_i = 4.52
amag_sun_des_z = 4.52
amag_sun_des_y = 4.51
# Based on http://www.baryons.org/ezgal/filters.php
amag_sun_ukiss_y = 4.515
# Extinction correction factor for HSC
## A\_lambda = Coeff * E(B-V)
a_hsc_g = 3.233
a_hsc_r = 2.291
a_hsc_i = 1.635
a_hsc_z = 1.261
a_hsc_y = 1.076
#
SIGMA1 = 0.3173
SIGMA2 = 0.0455
SIGMA3 = 0.0027
RSMA_COMMON = np.arange(0.4, 4.2, 0.02)
In [3]:
# Code for Get Bootstrap mean or median
def _confidence_interval_1d(A, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
"""Calculates bootstrap confidence interval along one dimensional array"""
if not isinstance(alpha, collections.Iterable):
alpha = np.array([alpha])
N = len(A)
resampleInds = np.random.randint(0, N, (numResamples,N))
metricOfResampled = metric(A[resampleInds], axis=-1)
confidenceInterval = np.zeros(2*len(alpha),dtype='float')
if interpolate:
for thisAlphaInd, thisAlpha in enumerate(alpha):
confidenceInterval[2*thisAlphaInd] = scoreatpercentile(metricOfResampled,
thisAlpha*100/2.0)
confidenceInterval[2*thisAlphaInd+1] = scoreatpercentile(metricOfResampled,
100-thisAlpha*100/2.0)
else:
sortedMetricOfResampled = np.sort(metricOfResampled)
for thisAlphaInd, thisAlpha in enumerate(alpha):
confidenceInterval[2*thisAlphaInd] = sortedMetricOfResampled[int(round(thisAlpha*numResamples/2.0))]
confidenceInterval[2*thisAlphaInd+1] = sortedMetricOfResampled[int(round(numResamples -
thisAlpha*numResamples/2.0))]
return confidenceInterval
def _ma_confidence_interval_1d(A, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
A = np.ma.masked_invalid(A, copy=True)
A = A.compressed()
confidenceInterval = _confidence_interval_1d(A, alpha, metric, numResamples, interpolate)
return confidenceInterval
def confidence_interval(A, axis=None, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
"""Return the bootstrap confidence interval of an array or along an axis ignoring NaNs and masked elements.
Parameters
----------
A : array_like
Array containing numbers whose confidence interval is desired.
axis : int, optional
Axis along which the confidence interval is computed.
The default is to compute the confidence interval of the flattened array.
alpha: float or array, optional
confidence level of confidence interval. 100.0*(1-alpha) percent confidence
interval will be returned.
If length-n array, n confidence intervals will be computed
The default is .05
metric : numpy function, optional
metric to calculate confidence interval for.
The default is numpy.mean
numResamples : int, optional
number of bootstrap samples. The default is 10000.
interpolate: bool, optional
uses scipy.stats.scoreatpercentile to interpolate between bootstrap samples
if alpha*numResamples/2.0 is not integer.
The default is True
Returns
-------
confidenceInterval : ndarray
An array with the same shape as `A`, with the specified axis replaced by one twice the length of the alpha
If `A` is a 0-d array, or if axis is None, a length-2 ndarray is returned.
"""
if interpolate is True and scoreatpercentile is False:
print("need scipy to interpolate between values")
interpolate = False
A = A.copy()
if axis is None:
A = A.ravel()
outA = _ma_confidence_interval_1d(A, alpha, metric, numResamples, interpolate)
else:
outA = np.apply_along_axis(_ma_confidence_interval_1d, axis, A, alpha,
metric, numResamples, interpolate)
return outA
def normProf(sma, sbp, minSma, maxSma, divide=False):
"""
Naive method to normalize the profile.
Parameters:
sbp : Array for surface brightness profile
sma : Radius range
minSma : Minimum SMA
maxSma Maximum SMA
"""
offset = np.nanmedian(sbp[(sma >= minSma) &
(sma <= maxSma)])
if divide:
return (sbp / offset)
else:
return (sbp-offset)
def pixKpc(redshift, pix=0.168, show=True, npix=1.0):
"""
Get the corresponding Kpc size of a pixel.
Parameters:
"""
pixKpc = pix * npix * hUtil.cosmoScale(redshift)
if show:
print("# %d pixel(s) = %6.3f Kpc" % (npix, pixKpc))
return pixKpc
def logAdd(para1, para2):
""" Useful for adding magnitudes. """
return np.log10((10.0 ** np.asarray(para1)) +
(10.0 ** np.asarray(para2)))
def errAdd(err1, err2):
"""Add error quadral..."""
return np.sqrt((err1 ** 2.0) +
(err2 ** 2.0))
def toColorArr(data, bottom=None, top=None):
"""
Convert a data array to "color array" (between 0 and 1).
Parameters:
bottom, top :
"""
if top is not None:
data[data >= top] = top
if bottom is not None:
data[data <= bottom] = bottom
return ((data - np.nanmin(data)) /
(np.nanmax(data) - np.nanmin(data))) * 255.0
def getLuminosity(mag, redshift, extinction=None,
amag_sun=None):
"""Get the absolute magnitude or luminosity."""
distmod = hUtil.cosmoDistMod(redshift)
absMag = (mag - distmod)
if extinction is not None:
absMag -= extinction
if amag_sun is not None:
absMag = ((amag_sun - absMag) / 2.5)
return absMag
def getStackProfiles(sample, loc, name='GAMA',
idCol='ID_USE', tabCol='sum_tab', save=True):
"""Get the stacks of the profiles."""
print("## Sample %s : Will deal with %d galaxies" % (name, len(sample)))
profiles = []
with ProgressBar(len(sample), ipython_widget=True) as bar:
for g in sample:
try:
gFile = os.path.join(loc, g['sum_tab'].replace('./', '')).strip()
gProf = Table.read(gFile, format='fits')
""" Add extra information """
try:
gProf.meta['KCORRECT_I'] = g['KCORRECT_I']
gProf.meta['KCORRECT_b_I'] = g['KCORRECT_b_I']
gProf.meta['KCORRECT_c_I'] = g['KCORRECT_c_I']
gProf.meta['KCORRECT_G'] = g['KCORRECT_G']
gProf.meta['KCORRECT_b_G'] = g['KCORRECT_b_G']
gProf.meta['KCORRECT_c_G'] = g['KCORRECT_c_G']
gProf.meta['KCORRECT_R'] = g['KCORRECT_R']
gProf.meta['KCORRECT_b_R'] = g['KCORRECT_b_R']
gProf.meta['KCORRECT_c_R'] = g['KCORRECT_c_R']
gProf.meta['KCORRECT_Z'] = g['KCORRECT_Z']
gProf.meta['KCORRECT_b_Z'] = g['KCORRECT_b_Z']
gProf.meta['KCORRECT_c_Z'] = g['KCORRECT_c_Z']
gProf.meta['KCORRECT_Y'] = g['KCORRECT_Y']
gProf.meta['KCORRECT_b_Y'] = g['KCORRECT_b_Y']
gProf.meta['KCORRECT_c_Y'] = g['KCORRECT_c_Y']
gProf.meta['LOGM2LI_A'] = g['logm2lI_A']
gProf.meta['LOGM2LI_B'] = g['logm2lI_B']
gProf.meta['LOGM2LI_C'] = g['logm2lI_C']
gProf.meta['LUM_100'] = g['lum_100']
gProf.meta['LUM_120'] = g['lum_120']
except Exception:
print("## WARNING: Some metadata may not be available !")
continue
except Exception:
print("## Missing: %s" % gFile)
continue
profiles.append(gProf)
bar.update()
if save:
outPkl = os.path.join(loc, (name + '_profs.pkl'))
hUtil.saveToPickle(profiles, outPkl)
print("## Save %s to %s" % (name, outPkl))
return profiles
def organizeSbp(profiles, col1='muI1', col2='KCORRECT_c_I',
kind='sbp', norm=False, r1=9.9, r2=10.1, divide=False,
col3=None, col4=None, justStack=False,
sun1=amag_sun_des_g, sun2=amag_sun_des_r):
""" Get the stack of individual profiels, and their med/avg. """
if kind.strip() == 'sbp':
if col2 is not None:
if norm:
stack = np.vstack(normProf(p['rKpc'],
np.asarray(p[col1] + (p.meta[col2] / 2.5)),
r1, r2, divide=divide)
for p in profiles)
else:
stack = np.vstack(np.asarray(p[col1] + (p.meta[col2] / 2.5))
for p in profiles)
else:
print("## NO KCORRECTION APPLIED !!")
if norm:
stack = np.vstack(normProf(p['rKpc'], p[col1],
r1, r2, divide=divide)
for p in profiles)
else:
stack = np.vstack(np.asarray(p[col1]) for p in profiles)
elif kind.strip() == 'mass':
if norm:
stack = np.vstack(normProf(p['rKpc'],
np.asarray(p[col1] + p.meta[col2]),
r1, r2, divide=divide) for p in profiles)
else:
stack = np.vstack(np.asarray(p[col1] + p.meta[col2]) for p in profiles)
elif kind.strip() == 'color':
cSun = (sun1 - sun2)
if col3 is None or col4 is None:
print("## NO KCORRECTION APPLIED !!")
if norm:
stack = np.vstack(normProf(p['rKpc'],
np.asarray(cSun - 2.5 * (p[col1] - p[col2])),
r1, r2, divide=divide) for p in profiles)
else:
stack = np.vstack(np.asarray(cSun - 2.5 *(p[col1] - p[col2])) for p in profiles)
else:
if norm:
stack = np.vstack(normProf(p['rKpc'],
np.asarray(cSun - 2.5 * (p[col1] - p[col2]) -
(p.meta[col3] - p.meta[col4])),
r1, r2, divide=divide) for p in profiles)
else:
stack = np.vstack(np.asarray(cSun - 2.5 * (p[col1] - p[col2]) -
(p.meta[col3] - p.meta[col4]))
for p in profiles)
elif kind.strip() == 'lum':
if col2 is None:
stack = np.vstack(np.asarray(p[col1]) for p in profiles)
else:
stack = np.vstack(np.asarray(p[col1] - p.meta[col2]) for p in profiles)
else:
raise Exception("## WRONG KIND !!")
if not justStack:
""" Get the median and 1-sigma confidence range """
medProf = confidence_interval(stack, axis=0, alpha=np.asarray([SIGMA1, 1.0]),
metric=np.nanmedian, numResamples=1000,
interpolate=True)
avgProf = confidence_interval(stack, axis=0, alpha=np.asarray([SIGMA1, 1.0]),
metric=np.nanmean, numResamples=1000,
interpolate=True)
stdProf = confidence_interval(stack, axis=0, alpha=np.asarray([SIGMA1, 1.0]),
metric=np.nanstd, numResamples=1000,
interpolate=True)
return stack, medProf, avgProf, stdProf
else:
return stack
def loadPkl(filename):
try:
import cPickle as pickle
except:
warnings.warn("## cPickle is not available!!")
import pickle
if os.path.isfile(filename):
pklFile = open(filename, 'rb')
data = pickle.load(pklFile)
pklFile.close()
return data
else:
warnings.warn("## Can not find %s, return None" % filename)
return None
In [37]:
newDir = '/Users/songhuang/work/hscs/gama_massive/sbp/'
bcgFile = 'redbcg_1d_160211.fits'
memFile = 'redmem_1d_160211.fits'
gamaFile = 'gama_1d_160211.fits'
try:
bcgTab
except NameError:
pass
else:
del bcgTab
try:
memTab
except NameError:
pass
else:
del memTab
try:
gamaTab
except NameError:
pass
else:
del gamaTab
# Two summary catalogs
bcgCat = os.path.join(newDir, bcgFile)
memCat = os.path.join(newDir, memFile)
gamaCat = os.path.join(newDir, gamaFile)
if not os.path.isfile(bcgCat):
raise Exception("## Can not find catalog for BCGs : %s" % bcgCat)
else:
bcgTab = Table.read(bcgCat, format='fits')
if not os.path.isfile(memCat):
raise Exception("## Can not find catalog for cluster members : %s" % memCat)
else:
memTab = Table.read(memCat, format='fits')
if not os.path.isfile(gamaCat):
raise Exception("## Can not find catalog for GAMA galaxies : %s" % gamaCat)
else:
gamaTab = Table.read(gamaCat, format='fits')
print("## Deal with %i galaxies in redBCH sample" % len(bcgTab))
print("## Deal with %i galaxies in redMEM sample" % len(memTab))
print("## Deal with %i galaxies in GAMA sample" % len(gamaTab))
In [38]:
gamaClean = gamaTab[(gamaTab['c82_120'] >= 5.0) & (gamaTab['gz_kC'] >= 1.5) &
(gamaTab['ur_rest_sed'] >= 2.0)]
print(len(gamaClean))
memClean = memTab[(memTab['c82_120'] >= 5.0) & (memTab['gz_kC'] >= 1.5) &
(memTab['ur_rest_sed'] >= 2.0)]
print(len(memClean))
In [32]:
# Luminosity 1
print("##########################################################################")
print("## 0.2 < z < 0.4 ; 11.2 < logL* < 11.4 using SBP Luminosity")
## Using lum100; model C
gamaL1c = gamaTab[(gamaTab['Z'] >= 0.20) & (gamaTab['Z'] <= 0.40) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_c_I'] / 2.5) >= 11.2) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_c_I'] / 2.5) <= 11.4)]
memL1c = memTab[(memTab['Z'] >= 0.20) & (memTab['Z'] <= 0.40) &
(memTab['lum_100'] + (memTab['KCORRECT_c_I'] / 2.5) >= 11.2) &
(memTab['lum_100'] + (memTab['KCORRECT_c_I'] / 2.5) <= 11.4)]
bcgL1c = bcgTab[(bcgTab['Z'] >= 0.20) & (bcgTab['Z'] <= 0.40) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_c_I'] / 2.5) >= 11.2) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_c_I'] / 2.5) <= 11.4)]
## Using lum100; model A
gamaL1a = gamaTab[(gamaTab['Z'] >= 0.20) & (gamaTab['Z'] <= 0.40) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_I'] / 2.5) >= 11.2) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_I'] / 2.5) <= 11.4)]
memL1a = memTab[(memTab['Z'] >= 0.20) & (memTab['Z'] <= 0.40) &
(memTab['lum_100'] + (memTab['KCORRECT_I'] / 2.5) >= 11.2) &
(memTab['lum_100'] + (memTab['KCORRECT_I'] / 2.5) <= 11.4)]
bcgL1a = bcgTab[(bcgTab['Z'] >= 0.20) & (bcgTab['Z'] <= 0.40) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_I'] / 2.5) >= 11.2) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_I'] / 2.5) <= 11.4)]
print("\n## GAMA L1 : %i, %i galaxies" % (len(gamaL1a), len(gamaL1c)))
print("## BCG L1 : %i, %i galaxies" % (len(bcgL1a), len(bcgL1c)))
print("## MEM L1 : %i, %i galaxies" % (len(memL1a), len(memL1c)))
print("\n## GAMA - Median redshift : ", np.nanmedian(gamaL1a['Z']), np.nanmedian(gamaL1c['Z']))
print("## BCG - Median redshift : ", np.nanmedian(bcgL1a['Z']), np.nanmedian(bcgL1c['Z']))
print("## MEM - Median redshift : ", np.nanmedian(memL1a['Z']), np.nanmedian(memL1c['Z']))
print("\n## GAMA - Median logL_sbp : ", (np.nanmedian(gamaL1c['lum_100'] + (gamaL1c['KCORRECT_c_I'] / 2.5))),
(np.nanmedian(gamaL1c['lum_100'] + (gamaL1c['KCORRECT_c_I'] / 2.5))))
print("## BCG - Median logL_sbp : ", (np.nanmedian(bcgL1c['lum_100'] + (bcgL1c['KCORRECT_c_I'] / 2.5))),
(np.nanmedian(bcgL1c['lum_100'] + (bcgL1c['KCORRECT_c_I'] / 2.5))))
print("## MEM - Median logL_sbp : ", (np.nanmedian(memL1c['lum_100'] + (memL1c['KCORRECT_c_I'] / 2.5))),
(np.nanmedian(memL1c['lum_100'] + (memL1c['KCORRECT_c_I'] / 2.5))))
print("##########################################################################\n")
bcgL1a_pcen = bcgL1a[bcgL1a['P_CEN_1'] >= 0.8]
bcgL1c_pcen = bcgL1c[bcgL1c['P_CEN_1'] >= 0.8]
print("## bcgL1a_pcen : %d out of %d" % (len(bcgL1a_pcen), len(bcgL1a)))
print("## bcgL1c_pcen : %d out of %d" % (len(bcgL1c_pcen), len(bcgL1c)))
print("## BCG/PCEN - Median redshift : ", np.nanmedian(bcgL1a_pcen['Z']), np.nanmedian(bcgL1c_pcen['Z']))
print("## BCG/PCEN - Median logL_sbp : ",
(np.nanmedian(bcgL1a_pcen['lum_100'] + (bcgL1a_pcen['KCORRECT_I'] / 2.5))),
(np.nanmedian(bcgL1c_pcen['lum_100'] + (bcgL1c_pcen['KCORRECT_c_I'] / 2.5))))
print("##########################################################################\n")
gamaL1c_use = gamaL1c[(gamaL1c['c82_120'] >=5.0) & (gamaL1c['gz_kC'] >= 1.5)]
gamaL1a_use = gamaL1a[(gamaL1a['c82_120'] >=5.0) & (gamaL1a['gz_kA'] >= 1.5)]
print("## gamaL1a_pcen : %d out of %d" % (len(gamaL1a_use), len(gamaL1a)))
print("## gamaL1c_pcen : %d out of %d" % (len(gamaL1c_use), len(gamaL1c)))
print("## GAMA/USE - Median redshift : ", np.nanmedian(gamaL1a_use['Z']),
np.nanmedian(gamaL1c_use['Z']))
print("## GAMA/USE - Median logL_sbp : ",
(np.nanmedian(gamaL1a_use['lum_100'] + (gamaL1a_use['KCORRECT_I'] / 2.5))),
(np.nanmedian(gamaL1c_use['lum_100'] + (gamaL1c_use['KCORRECT_c_I'] / 2.5))))
print("##########################################################################")
In [35]:
# Luminosity 1
print("##########################################################################")
print("## 0.2 < z < 0.4 ; 11.4 < logL* < 11.6 using SBP Luminosity")
## Using lum100; model C
gamaL2c = gamaTab[(gamaTab['Z'] >= 0.20) & (gamaTab['Z'] <= 0.50) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_c_I'] / 2.5) >= 11.4) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_c_I'] / 2.5) <= 11.6)]
memL2c = memTab[(memTab['Z'] >= 0.20) & (memTab['Z'] <= 0.50) &
(memTab['lum_100'] + (memTab['KCORRECT_c_I'] / 2.5) >= 11.4) &
(memTab['lum_100'] + (memTab['KCORRECT_c_I'] / 2.5) <= 11.6)]
bcgL2c = bcgTab[(bcgTab['Z'] >= 0.20) & (bcgTab['Z'] <= 0.50) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_c_I'] / 2.5) >= 11.4) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_c_I'] / 2.5) <= 11.6)]
## Using lum100; model A
gamaL2a = gamaTab[(gamaTab['Z'] >= 0.20) & (gamaTab['Z'] <= 0.50) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_I'] / 2.5) >= 11.4) &
(gamaTab['lum_100'] + (gamaTab['KCORRECT_I'] / 2.5) <= 11.6)]
memL2a = memTab[(memTab['Z'] >= 0.20) & (memTab['Z'] <= 0.50) &
(memTab['lum_100'] + (memTab['KCORRECT_I'] / 2.5) >= 11.4) &
(memTab['lum_100'] + (memTab['KCORRECT_I'] / 2.5) <= 11.6)]
bcgL2a = bcgTab[(bcgTab['Z'] >= 0.20) & (bcgTab['Z'] <= 0.50) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_I'] / 2.5) >= 11.4) &
(bcgTab['lum_100'] + (bcgTab['KCORRECT_I'] / 2.5) <= 11.6)]
print("\n## GAMA L2 : %i, %i galaxies" % (len(gamaL2a), len(gamaL2c)))
print("## BCG L2 : %i, %i galaxies" % (len(bcgL2a), len(bcgL2c)))
print("## MEM L2 : %i, %i galaxies" % (len(memL2a), len(memL2c)))
print("\n## GAMA - Median redshift : ", np.nanmedian(gamaL2a['Z']), np.nanmedian(gamaL2c['Z']))
print("## BCG - Median redshift : ", np.nanmedian(bcgL2a['Z']), np.nanmedian(bcgL2c['Z']))
print("## MEM - Median redshift : ", np.nanmedian(memL2a['Z']), np.nanmedian(memL2c['Z']))
print("\n## GAMA - Median logL_sbp : ", (np.nanmedian(gamaL2c['lum_100'] + (gamaL2c['KCORRECT_c_I'] / 2.5))),
(np.nanmedian(gamaL2c['lum_100'] + (gamaL2c['KCORRECT_c_I'] / 2.5))))
print("## BCG - Median logL_sbp : ", (np.nanmedian(bcgL2c['lum_100'] + (bcgL2c['KCORRECT_c_I'] / 2.5))),
(np.nanmedian(bcgL2c['lum_100'] + (bcgL2c['KCORRECT_c_I'] / 2.5))))
print("## MEM - Median logL_sbp : ", (np.nanmedian(memL2c['lum_100'] + (memL2c['KCORRECT_c_I'] / 2.5))),
(np.nanmedian(memL2c['lum_100'] + (memL2c['KCORRECT_c_I'] / 2.5))))
print("##########################################################################\n")
bcgL2a_pcen = bcgL2a[bcgL2a['P_CEN_1'] >= 0.8]
bcgL2c_pcen = bcgL2c[bcgL2c['P_CEN_1'] >= 0.8]
print("## bcgL2a_pcen : %d out of %d" % (len(bcgL2a_pcen), len(bcgL2a)))
print("## bcgL2c_pcen : %d out of %d" % (len(bcgL2c_pcen), len(bcgL2c)))
print("## BCG/PCEN - Median redshift : ", np.nanmedian(bcgL2a_pcen['Z']), np.nanmedian(bcgL2c_pcen['Z']))
print("## BCG/PCEN - Median logL_sbp : ",
(np.nanmedian(bcgL2a_pcen['lum_100'] + (bcgL2a_pcen['KCORRECT_I'] / 2.5))),
(np.nanmedian(bcgL2c_pcen['lum_100'] + (bcgL2c_pcen['KCORRECT_c_I'] / 2.5))))
print("##########################################################################\n")
gamaL2c_use = gamaL2c[(gamaL2c['c82_120'] >= 5.0) & (gamaL2c['gz_kC'] >= 1.5)]
gamaL2a_use = gamaL2a[(gamaL2a['c82_120'] >= 5.0) & (gamaL2a['gz_kA'] >= 1.5)]
print("## gamaL2a_pcen : %d out of %d" % (len(gamaL2a_use), len(gamaL2a)))
print("## gamaL2c_pcen : %d out of %d" % (len(gamaL2c_use), len(gamaL2c)))
print("## GAMA/USE - Median redshift : ", np.nanmedian(gamaL2a_use['Z']),
np.nanmedian(gamaL2c_use['Z']))
print("## GAMA/USE - Median logL_sbp : ",
(np.nanmedian(gamaL2a_use['lum_100'] + (gamaL2a_use['KCORRECT_I'] / 2.5))),
(np.nanmedian(gamaL2c_use['lum_100'] + (gamaL2c_use['KCORRECT_c_I'] / 2.5))))
print("##########################################################################")
In [ ]: