In [15]:
%pylab inline

from __future__ import (division, print_function)

import os
import sys
import copy
import fnmatch
import warnings
import collections

import numpy as np
import scipy
try:
    from scipy.stats import scoreatpercentile
except:
    scoreatpercentile = False
from scipy.interpolate import interp1d
import cPickle as pickle

# Astropy
from astropy.io import fits
from astropy    import units as u
from astropy.stats import sigma_clip
from astropy.table import Table, Column
from astropy.utils.console import ProgressBar

# AstroML
from astroML.plotting import hist

# Matplotlib related
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from matplotlib.ticker import NullFormatter
from matplotlib.ticker import MaxNLocator
# Matplotlib default settings
rcdef = plt.rcParams.copy()
pylab.rcParams['figure.figsize'] = 12, 10
pylab.rcParams['xtick.major.size'] = 8.0
pylab.rcParams['xtick.major.width'] = 2.5
pylab.rcParams['xtick.minor.size'] = 4.0
pylab.rcParams['xtick.minor.width'] = 2.5
pylab.rcParams['ytick.major.size'] = 8.0
pylab.rcParams['ytick.major.width'] = 2.5
pylab.rcParams['ytick.minor.size'] = 4.0
pylab.rcParams['ytick.minor.width'] = 2.5

# Personal
import hscUtils as hUtil
import galSBP

import matplotlib.patches as mpatches
from matplotlib.patches import Ellipse
from matplotlib.collections import PatchCollection


Populating the interactive namespace from numpy and matplotlib
WARNING: pylab import has clobbered these variables: ['copy', 'hist']
`%matplotlib` prevents importing * from pylab and numpy

In [16]:
# Absolute magnitude of sun in HSC filters

# Actuall borrowed from DES filters
# Values from magsun.data in FSPS
amag_sun_des_g = 5.08
amag_sun_des_r = 4.62
amag_sun_des_i = 4.52
amag_sun_des_z = 4.52
amag_sun_des_y = 4.51

# Based on http://www.baryons.org/ezgal/filters.php
amag_sun_ukiss_y = 4.515

# Extinction correction factor for HSC 
## A\_lambda = Coeff * E(B-V) 

a_hsc_g = 3.233
a_hsc_r = 2.291 
a_hsc_i = 1.635
a_hsc_z = 1.261
a_hsc_y = 1.076

# 
SIGMA1 = 0.3173
SIGMA2 = 0.0455
SIGMA3 = 0.0027

RSMA_COMMON = np.arange(0.4, 4.2, 0.02)

In [17]:
# Code for Get Bootstrap mean or median 
def _confidence_interval_1d(A, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
    """Calculates bootstrap confidence interval along one dimensional array"""
    
    if not isinstance(alpha, collections.Iterable):
        alpha = np.array([alpha])

    N = len(A)
    resampleInds = np.random.randint(0, N, (numResamples,N))
    metricOfResampled = metric(A[resampleInds], axis=-1)

    confidenceInterval = np.zeros(2*len(alpha),dtype='float')
    
    if interpolate:
        for thisAlphaInd, thisAlpha in enumerate(alpha):
            confidenceInterval[2*thisAlphaInd] = scoreatpercentile(metricOfResampled, 
                                                                   thisAlpha*100/2.0)
            confidenceInterval[2*thisAlphaInd+1] = scoreatpercentile(metricOfResampled, 
                                                                     100-thisAlpha*100/2.0)
    else:
        sortedMetricOfResampled = np.sort(metricOfResampled)
        for thisAlphaInd, thisAlpha in enumerate(alpha):
            confidenceInterval[2*thisAlphaInd] = sortedMetricOfResampled[int(round(thisAlpha*numResamples/2.0))]
            confidenceInterval[2*thisAlphaInd+1] = sortedMetricOfResampled[int(round(numResamples - 
                                                                                     thisAlpha*numResamples/2.0))]
    return confidenceInterval
    
def _ma_confidence_interval_1d(A, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
    A = np.ma.masked_invalid(A, copy=True)
    A = A.compressed()
    confidenceInterval = _confidence_interval_1d(A, alpha, metric, numResamples, interpolate)
    return confidenceInterval

def confidence_interval(A, axis=None, alpha=.05, metric=np.mean, numResamples=10000, interpolate=True):
    """Return the bootstrap confidence interval of an array or along an axis ignoring NaNs and masked elements.
    
    Parameters
    ----------
    A : array_like
        Array containing numbers whose confidence interval is desired. 
    axis : int, optional
        Axis along which the confidence interval is computed.
        The default is to compute the confidence interval of the flattened array.
    alpha: float or array, optional
        confidence level of confidence interval. 100.0*(1-alpha) percent confidence 
        interval will be returned.
        If length-n array, n confidence intervals will be computed
        The default is .05
    metric : numpy function, optional
        metric to calculate confidence interval for.
        The default is numpy.mean
    numResamples : int, optional
        number of bootstrap samples. The default is 10000.
    interpolate: bool, optional
        uses scipy.stats.scoreatpercentile to interpolate between bootstrap samples 
        if alpha*numResamples/2.0 is not integer.
        The default is True
        
    Returns
    -------
    confidenceInterval : ndarray
    An array with the same shape as `A`, with the specified axis replaced by one twice the length of the alpha
    If `A` is a 0-d array, or if axis is None, a length-2 ndarray is returned.
    """
    if interpolate is True and scoreatpercentile is False:
        print("need scipy to interpolate between values")
        interpolate = False
    A = A.copy()
    if axis is None:
        A = A.ravel()
        outA = _ma_confidence_interval_1d(A, alpha, metric, numResamples, interpolate)
    else:
        outA = np.apply_along_axis(_ma_confidence_interval_1d, axis, A, alpha, 
                                   metric, numResamples, interpolate)
        
    return outA

In [18]:
def normProf(sma, sbp, minSma, maxSma, divide=False): 
    """
    Naive method to normalize the profile. 
    
    Parameters: 
        sbp    : Array for surface brightness profile 
        sma    : Radius range 
        minSma : Minimum SMA
        maxSma   Maximum SMA
    """
    offset = np.nanmedian(sbp[(sma >= minSma) & 
                              (sma <= maxSma)])
    if divide: 
        return (sbp / offset)
    else:
        return (sbp-offset)

In [19]:
def getStackProfiles(sample, loc, name='GAMA', 
                     idCol='ID_USE', tabCol='sum_tab', save=True):
    """Get the stacks of the profiles."""
    print("## Sample %s : Will deal with %d galaxies" % (name, len(sample)))
    profiles = []
    with ProgressBar(len(sample), ipython_widget=True) as bar:
        for g in sample:
            try:
                gFile = os.path.join(loc, g['sum_tab'].replace('./', '')).strip()
                gProf = Table.read(gFile, format='fits')
                """ Add extra information """
                try: 
                    gProf.meta['KCORRECT_I'] = g['KCORRECT_I']
                    gProf.meta['KCORRECT_b_I'] = g['KCORRECT_b_I']
                    gProf.meta['KCORRECT_c_I'] = g['KCORRECT_c_I']
                    gProf.meta['KCORRECT_G'] = g['KCORRECT_G']
                    gProf.meta['KCORRECT_b_G'] = g['KCORRECT_b_G']
                    gProf.meta['KCORRECT_c_G'] = g['KCORRECT_c_G']
                    gProf.meta['KCORRECT_R'] = g['KCORRECT_R']
                    gProf.meta['KCORRECT_b_R'] = g['KCORRECT_b_R']
                    gProf.meta['KCORRECT_c_R'] = g['KCORRECT_c_R']
                    gProf.meta['KCORRECT_Z'] = g['KCORRECT_Z']
                    gProf.meta['KCORRECT_b_Z'] = g['KCORRECT_b_Z']
                    gProf.meta['KCORRECT_c_Z'] = g['KCORRECT_c_Z']
                    gProf.meta['KCORRECT_Y'] = g['KCORRECT_Y']
                    gProf.meta['KCORRECT_b_Y'] = g['KCORRECT_b_Y']
                    gProf.meta['KCORRECT_c_Y'] = g['KCORRECT_c_Y']
                    gProf.meta['LOGM2LI_A'] = g['logm2lI_A']
                    gProf.meta['LOGM2LI_B'] = g['logm2lI_B']
                    gProf.meta['LOGM2LI_C'] = g['logm2lI_C']
                    gProf.meta['LUM_100'] = g['lum_100']
                    gProf.meta['LUM_120'] = g['lum_120']
                except Exception:
                    print("## WARNING: Some metadata may not be available !")
                    continue
            except Exception:
                print("## Missing: %s" % gFile)
                continue 
            profiles.append(gProf)
            bar.update()
    
    if save: 
        outPkl = os.path.join(loc, (name + '_profs.pkl'))
        hUtil.saveToPickle(profiles, outPkl)
        print("## Save %s to %s" % (name, outPkl))
        
    return profiles

In [20]:
def organizeSbp(profiles, col1='muI1', col2='KCORRECT_c_I', 
                kind='sbp', norm=False, r1=9.9, r2=10.1, divide=False,
                col3=None, col4=None, justStack=False,
                sun1=amag_sun_des_g, sun2=amag_sun_des_r):
    """ Get the stack of individual profiels, and their med/avg. """
    if kind.strip() == 'sbp':
        if col2 is not None: 
            if norm:
                stack = np.vstack(normProf(p['rKpc'], 
                                           np.asarray(p[col1] + (p.meta[col2] / 2.5)), 
                                           r1, r2, divide=divide) 
                                  for p in profiles)
            else:
                stack = np.vstack(np.asarray(p[col1] + (p.meta[col2] / 2.5)) 
                                  for p in profiles)
        else: 
            print("## NO KCORRECTION APPLIED !!")            
            if norm:
                stack = np.vstack(normProf(p['rKpc'], p[col1], 
                                           r1, r2, divide=divide) 
                                  for p in profiles)
            else:
                stack = np.vstack(np.asarray(p[col1]) for p in profiles)
    elif kind.strip() == 'mass':
        if norm:
            stack = np.vstack(normProf(p['rKpc'], 
                                       np.asarray(p[col1] + p.meta[col2]), 
                                       r1, r2, divide=divide) for p in profiles)
        else: 
            stack = np.vstack(np.asarray(p[col1] + p.meta[col2]) for p in profiles)
    elif kind.strip() == 'color':
        cSun = (sun1 - sun2)
        if col3 is None or col4 is None:
            print("## NO KCORRECTION APPLIED !!")
            if norm:
                stack = np.vstack(normProf(p['rKpc'], 
                                           np.asarray(cSun - 2.5 * (p[col1] - p[col2])), 
                                           r1, r2, divide=divide) for p in profiles)
            else: 
                stack = np.vstack(np.asarray(cSun - 2.5 *(p[col1] - p[col2])) for p in profiles)
        else:
            if norm:
                stack = np.vstack(normProf(p['rKpc'], 
                                           np.asarray(cSun - 2.5 * (p[col1] - p[col2]) -
                                                      (p.meta[col3] - p.meta[col4])), 
                                           r1, r2, divide=divide) for p in profiles)
            else: 
                stack = np.vstack(np.asarray(cSun - 2.5 * (p[col1] - p[col2]) -
                                             (p.meta[col3] - p.meta[col4])) 
                                  for p in profiles)
    elif kind.strip() == 'lum':
        if col2 is None:
            stack = np.vstack(np.asarray(p[col1]) for p in profiles)
        else:
            stack = np.vstack(np.asarray(p[col1] - p.meta[col2]) for p in profiles)
    else: 
        raise Exception("## WRONG KIND !!")
        
    if not justStack:
        """ Get the median and 1-sigma confidence range """
        medProf = confidence_interval(stack, axis=0, alpha=np.asarray([SIGMA1, 1.0]), 
                                      metric=np.nanmedian, numResamples=1000, 
                                      interpolate=True) 
        avgProf = confidence_interval(stack, axis=0, alpha=np.asarray([SIGMA1, 1.0]), 
                                      metric=np.nanmean, numResamples=1000, 
                                      interpolate=True) 
        stdProf = confidence_interval(stack, axis=0, alpha=np.asarray([SIGMA1, 1.0]), 
                                      metric=np.nanstd, numResamples=1000, 
                                      interpolate=True) 
        return stack, medProf, avgProf, stdProf
    else: 
        return stack

In [21]:
def loadPkl(filename):
    try:
        import cPickle as pickle
    except:
        warnings.warn("## cPickle is not available!!")
        import pickle
    
    if os.path.isfile(filename):
        pklFile = open(filename, 'rb')
        data = pickle.load(pklFile)    
        pklFile.close()
    
        return data
    else: 
        warnings.warn("## Can not find %s, return None" % filename)
        return None

In [22]:
def getFracRadius(prof, frac=0.5, maxRad=100.0, lum='lumI1', returnRatio=False):
    
    rkpc = prof['rKpc']
    use = prof[lum]
        
    if maxRad is None: 
        maxUse = np.nanmax(use)
    else: 
        f = interp1d(rkpc, use)
        maxUse = f(maxRad)
    
    ratio = (10.0 ** use) / (10.0 ** maxUse)
    
    f2 = interp1d(ratio, rkpc)
    radFrac = f2(frac)
    
    if returnRatio:
        return ratio, radFrac
    else:
        return radFrac

Recent Results:


In [23]:
newDir = '/Users/songhuang/work/hscs/gama_massive/sbp/'

try:
    bcgTab
except NameError:
    pass
else:
    del bcgTab
    
try:
    memTab
except NameError:
    pass
else:
    del memTab    
    
try:
    gamaTab
except NameError:
    pass
else:
    del gamaTab
    
# Folder for 3 datasets
bcgDir = os.path.join(newDir, 'redbcg')
memDir = os.path.join(newDir, 'redmem')
gamaDir = os.path.join(newDir, 'gama')

# Two summary catalogs
#bcgCat = os.path.join(bcgDir, 'redmapper_bcg_hscmatch_mass_use_sbpsum_modA_muI1.fits')
bcgCat = os.path.join(bcgDir, 'redbcg_mass_use_dom.fits')
memCat = os.path.join(memDir, 'redmapper_mem_hscmatch_mass_sbpsum_modA_muI1.fits')
gamaCat = os.path.join(gamaDir, 'gama_massive_160107_sbpsum_modA_muI1.fits')

if not os.path.isfile(bcgCat):
    raise Exception("## Can not find catalog for BCGs : %s" % bcgCat)
else: 
    bcgTab = Table.read(bcgCat, format='fits')

if not os.path.isfile(memCat):
    raise Exception("## Can not find catalog for cluster members : %s" % memCat)
else: 
    memTab = Table.read(memCat, format='fits')
    
if not os.path.isfile(gamaCat):
    raise Exception("## Can not find catalog for GAMA galaxies : %s" % gamaCat)
else: 
    gamaTab = Table.read(gamaCat, format='fits')

Read in the catalogs for certain subsample:

M1a


In [43]:
suffixT = 'M1a'
suffixP = 'm1a'
usePcen = True

if usePcen: 
    bcgSample1 = Table.read(os.path.join(bcgDir, 'bcg_' + suffixT + '_pcen.fits'), format='fits')
else: 
    bcgSample1 = Table.read(os.path.join(bcgDir, 'bcg_' + suffixT + '.fits'), format='fits')
    
memSample1 = Table.read(os.path.join(memDir, 'mem_' + suffixT + '.fits'), format='fits')
gamaSample1 = Table.read(os.path.join(gamaDir, 'gama_' + suffixT + '.fits'), format='fits')

In [44]:
if usePcen:
    bcgPkl1 = os.path.join(bcgDir, 'massive_bcg_' + suffixP + '_p_profs.pkl')
else: 
    bcgPkl1 = os.path.join(bcgDir, 'massive_bcg_' + suffixP + '_profs.pkl')

if os.path.isfile(bcgPkl1):
    print(bcgPkl1)
    print("# Read in available stacks of BCG/%s" % suffixT)
    bcgProfs1 = loadPkl(bcgPkl1)
else:
    bcgProfs1 = getStackProfiles(bcgSample1, bcgDir, name=('massive_bcg_' + suffixP))

print("## Dealing with %d profiles" % len(bcgProfs1))


/Users/songhuang/work/hscs/gama_massive/sbp/redbcg/massive_bcg_m1a_p_profs.pkl
# Read in available stacks of BCG/M1a
## Dealing with 39 profiles

In [45]:
memPkl1 = os.path.join(memDir, 'massive_mem_' + suffixP + '_profs.pkl')

if os.path.isfile(memPkl1):
    print(memPkl1)
    print("# Read in available stacks of BCG/%s" % suffixT)
    memProfs1 = loadPkl(memPkl1)
else:
    memProfs1 = getStackProfiles(memSample1, memDir, name=('massive_mem_' + suffixP))

print("## Dealing with %d profiles" % len(memProfs1))


/Users/songhuang/work/hscs/gama_massive/sbp/redmem/massive_mem_m1a_profs.pkl
# Read in available stacks of BCG/M1a
## Dealing with 263 profiles

In [46]:
gamaPkl1 = os.path.join(gamaDir, 'massive_gama_' + suffixP + '_profs.pkl')

if os.path.isfile(gamaPkl1):
    print(gamaPkl1)
    print("# Read in available stacks of BCG/%s" % suffixT)
    gamaProfs1 = loadPkl(gamaPkl1)
else:
    gamaProfs1 = getStackProfiles(gamaSample1, gamaDir, name=('massive_gama_' + suffixP))

print("## Dealing with %d profiles" % len(gamaProfs1))


/Users/songhuang/work/hscs/gama_massive/sbp/gama/massive_gama_m1a_profs.pkl
# Read in available stacks of BCG/M1a
## Dealing with 1053 profiles

M2a


In [47]:
suffixT = 'M2a'
suffixP = 'm2a'
usePcen = True

if usePcen: 
    bcgSample2 = Table.read(os.path.join(bcgDir, 'bcg_' + suffixT + '_pcen.fits'), format='fits')
else: 
    bcgSample2 = Table.read(os.path.join(bcgDir, 'bcg_' + suffixT + '.fits'), format='fits')
    
memSample2 = Table.read(os.path.join(memDir, 'mem_' + suffixT + '.fits'), format='fits')
gamaSample2 = Table.read(os.path.join(gamaDir, 'gama_' + suffixT + '.fits'), format='fits')

In [48]:
if usePcen:
    bcgPkl2 = os.path.join(bcgDir, 'massive_bcg_' + suffixP + '_p_profs.pkl')
else: 
    bcgPkl2 = os.path.join(bcgDir, 'massive_bcg_' + suffixP + '_profs.pkl')

if os.path.isfile(bcgPkl2):
    print(bcgPkl2)
    print("# Read in available stacks of BCG/%s" % suffixT)
    bcgProfs2 = loadPkl(bcgPkl2)
else:
    bcgProfs2 = getStackProfiles(bcgSample2, bcgDir, name=('massive_bcg_' + suffixP))

print("## Dealing with %d profiles" % len(bcgProfs2))


/Users/songhuang/work/hscs/gama_massive/sbp/redbcg/massive_bcg_m2a_p_profs.pkl
# Read in available stacks of BCG/M2a
## Dealing with 42 profiles

In [49]:
memPkl2 = os.path.join(memDir, 'massive_mem_' + suffixP + '_profs.pkl')

if os.path.isfile(memPkl2):
    print(memPkl2)
    print("# Read in available stacks of BCG/%s" % suffixT)
    memProfs2 = loadPkl(memPkl2)
else:
    memProfs2 = getStackProfiles(memSample2, memDir, name=('massive_mem_' + suffixP))

print("## Dealing with %d profiles" % len(memProfs2))


/Users/songhuang/work/hscs/gama_massive/sbp/redmem/massive_mem_m2a_profs.pkl
# Read in available stacks of BCG/M2a
## Dealing with 81 profiles

In [50]:
gamaPkl2 = os.path.join(gamaDir, 'massive_gama_' + suffixP + '_profs.pkl')

if os.path.isfile(gamaPkl2):
    print(gamaPkl2)
    print("# Read in available stacks of BCG/%s" % suffixT)
    gamaProfs2 = loadPkl(gamaPkl2)
else:
    gamaProfs2 = getStackProfiles(gamaSample2, gamaDir, name=('massive_gama_' + suffixP))

print("## Dealing with %d profiles" % len(gamaProfs2))


/Users/songhuang/work/hscs/gama_massive/sbp/gama/massive_gama_m2a_profs.pkl
# Read in available stacks of BCG/M2a
## Dealing with 304 profiles

M3a


In [51]:
suffixT = 'M3a'
suffixP = 'm3a'
usePcen = True

if usePcen: 
    bcgSample3 = Table.read(os.path.join(bcgDir, 'bcg_' + suffixT + '_pcen.fits'), format='fits')
else: 
    bcgSample3 = Table.read(os.path.join(bcgDir, 'bcg_' + suffixT + '.fits'), format='fits')
    
memSample3 = Table.read(os.path.join(memDir, 'mem_' + suffixT + '.fits'), format='fits')
gamaSample3 = Table.read(os.path.join(gamaDir, 'gama_' + suffixT + '.fits'), format='fits')

In [52]:
if usePcen:
    bcgPkl3 = os.path.join(bcgDir, 'massive_bcg_' + suffixP + '_p_profs.pkl')
else: 
    bcgPkl3 = os.path.join(bcgDir, 'massive_bcg_' + suffixP + '_profs.pkl')

if os.path.isfile(bcgPkl3):
    print(bcgPkl3)
    print("# Read in available stacks of BCG/%s" % suffixT)
    bcgProfs3 = loadPkl(bcgPkl3)
else:
    bcgProfs3 = getStackProfiles(bcgSample3, bcgDir, name=('massive_bcg_' + suffixP))

print("## Dealing with %d profiles" % len(bcgProfs3))


/Users/songhuang/work/hscs/gama_massive/sbp/redbcg/massive_bcg_m3a_p_profs.pkl
# Read in available stacks of BCG/M3a
## Dealing with 26 profiles

In [53]:
memPkl3 = os.path.join(memDir, 'massive_mem_' + suffixP + '_profs.pkl')

if os.path.isfile(memPkl3):
    print(memPkl3)
    print("# Read in available stacks of BCG/%s" % suffixT)
    memProfs3 = loadPkl(memPkl3)
else:
    memProfs3 = getStackProfiles(memSample3, memDir, name=('massive_mem_' + suffixP))

print("## Dealing with %d profiles" % len(memProfs3))


/Users/songhuang/work/hscs/gama_massive/sbp/redmem/massive_mem_m3a_profs.pkl
# Read in available stacks of BCG/M3a
## Dealing with 33 profiles

In [54]:
gamaPkl3 = os.path.join(gamaDir, 'massive_gama_' + suffixP + '_profs.pkl')

if os.path.isfile(gamaPkl3):
    print(gamaPkl3)
    print("# Read in available stacks of BCG/%s" % suffixT)
    gamaProfs3 = loadPkl(gamaPkl3)
else:
    gamaProfs3 = getStackProfiles(gamaSample3, gamaDir, name=('massive_gama_' + suffixP))

print("## Dealing with %d profiles" % len(gamaProfs3))


/Users/songhuang/work/hscs/gama_massive/sbp/gama/massive_gama_m3a_profs.pkl
# Read in available stacks of BCG/M3a
## Dealing with 53 profiles

Test the R50, R90 measurements:


In [92]:
gamaR50_1 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0,
                                               lum='lumI1', returnRatio=False) for pp in gamaProfs1]))
gamaR90_1 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                               lum='lumI1', returnRatio=False) for pp in gamaProfs1]))
gamaM100_1 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in gamaProfs1])

gamaR50_2 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0,
                                               lum='lumI1', returnRatio=False) for pp in gamaProfs2]))
gamaR90_2 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                               lum='lumI1', returnRatio=False) for pp in gamaProfs2]))
gamaM100_2 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in gamaProfs2])

gamaR50_3 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0,
                                               lum='lumI1', returnRatio=False) for pp in gamaProfs3]))
gamaR90_3 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                               lum='lumI1', returnRatio=False) for pp in gamaProfs3]))
gamaM100_3 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in gamaProfs3])

In [93]:
bcgR50_1 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in bcgProfs1]))
bcgR90_1 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in bcgProfs1]))
bcgM100_1 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in bcgProfs1])

bcgR50_2 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in bcgProfs2]))
bcgR90_2 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in bcgProfs2]))
bcgM100_2 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in bcgProfs2])

bcgR50_3 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in bcgProfs3]))
bcgR90_3 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in bcgProfs3]))
bcgM100_3 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in bcgProfs3])

In [94]:
memR50_1 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in memProfs1]))
memR90_1 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in memProfs1]))
memM100_1 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in memProfs1])

memR50_2 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in memProfs2]))
memR90_2 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in memProfs2]))
memM100_2 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in memProfs2])

memR50_3 = np.log10(np.asarray([getFracRadius(pp, frac=0.5, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in memProfs3]))
memR90_3 = np.log10(np.asarray([getFracRadius(pp, frac=0.9, maxRad=160.0, 
                                              lum='lumI1', returnRatio=False) for pp in memProfs3]))
memM100_3 = np.asarray([(pp.meta['LUM_100'] + pp.meta['LOGM2LI_C']) for pp in memProfs3])

In [95]:
print(np.nanmedian(gamaR90_1), np.nanmedian(bcgR90_1))
print(np.nanmedian(gamaR90_2), np.nanmedian(bcgR90_2))
print(np.nanmedian(gamaR90_3), np.nanmedian(bcgR90_3))


1.75641588913 1.90564494332
1.8242390075 1.91646866358
1.96288489334 1.94535662895

In [96]:
logmGama = np.hstack([gamaM100_1, gamaM100_2, gamaM100_3]).ravel()
logr50Gama = np.hstack([gamaR50_1, gamaR50_2, gamaR50_3]).ravel()
logr90Gama = np.hstack([gamaR90_1, gamaR90_2, gamaR90_3]).ravel()

logmBcg = np.hstack([bcgM100_1, bcgM100_2, bcgM100_3]).ravel()
logr50Bcg = np.hstack([bcgR50_1, bcgR50_2, bcgR50_3]).ravel()
logr90Bcg = np.hstack([bcgR90_1, bcgR90_2, bcgR90_3]).ravel()

In [97]:
logmGamaU = logmGama[logmGama >= 11.42]
logr50GamaU = logr50Gama[logmGama >= 11.42]
logr90GamaU = logr90Gama[logmGama >= 11.42]

logmBcgU = logmBcg[logmBcg >= 11.42]
logr50BcgU = logr50Bcg[logmBcg >= 11.42]
logr90BcgU = logr90Bcg[logmBcg >= 11.42]

In [98]:
logmGamaM = logmGama[logmGama >= 11.62]
logr50GamaM = logr50Gama[logmGama >= 11.62]
logr90GamaM = logr90Gama[logmGama >= 11.62]

logmBcgM = logmBcg[logmBcg >= 11.62]
logr50BcgM = logr50Bcg[logmBcg >= 11.62]
logr90BcgM = logr90Bcg[logmBcg >= 11.62]

In [99]:
plt.xlim(10.5, 12.6)
plt.ylim(0.05, 2.59)

plt.xlabel('$\log\ (M_{\star}/M_{\odot})$', fontsize=35)
plt.ylabel('$\log\ (R_{\mathrm{e}}/\mathrm{Kpc})$', fontsize=35)

plt.scatter(logmGamaU, logr50GamaU, c='k', marker='o', alpha=0.2)
plt.scatter(logmBcgU, logr50BcgU, c='r', marker='h', s=70, alpha=0.7)

plt.show()


Fit Mass-Size Relation


In [100]:
import emcee
import corner

# Likelihood function
def lnlike(theta, x, y, yerr):
    a, b, lnf = theta
    # Straightline model 
    model = a * x + b
    inv_sigma2 = (1.0 / (yerr**2 + model**2*np.exp(2*lnf)))
    return -0.5*(np.sum((y-model)**2*inv_sigma2 - 
                         np.log(inv_sigma2)))

# Priors
def lnprior(theta):
    a, b, lnf = theta
    if 0.2 < a < 1.4 and -13.0 < b < 0.0 and -3.0 < lnf < 3.0:
        return 0.0
    return -np.inf

def lnprob(theta, x, y, yerr):
    lp = lnprior(theta)
    if not np.isfinite(lp):
        return -np.inf
    return lp + lnlike(theta, x, y, yerr)

#Llinear least squares solution 
def llsLine(x, y, yerr):
    """ Simple straingt line fitting """
    A = np.vstack((np.ones_like(x), x)).T
    C = np.diag(yerr * yerr)
    cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
    b_ls, a_ls = np.dot(cov, np.dot(A.T, 
                                    np.linalg.solve(C, y)))
    print("LLS: a =%8.5f ; b =%8.5f" % (a_ls, b_ls))
    return a_ls, b_ls
    
# Use Emcee to fit a straight line 
def emceeLine(x, y, yerr, nwalkers=100, ndim=3, nburn=100, 
              nstep=600, show=True): 
    """ Initial guesses from simple LLS fitting """
    #a_ls, b_ls = llsLine(x, y, yerr)
    a_ls, b_ls = 0.9, -10.5
    initial = [a_ls, b_ls, 0.00]
    """ Start the sampler """
    sampler = emcee.EnsembleSampler(nwalkers, ndim, 
                                    lnprob, args=(x, y, yerr))
    """ Initializing the walkers. """
    np.random.seed(0)
    guesses = [initial + (1e-2*np.random.randn(ndim)) 
               for i in range(nwalkers)]
    """ Run MCMC """
    print("Start the MCMC runs")
    %time sampler.run_mcmc(guesses, nstep)
    print("Done")
    """ Flatten the chain so that we have a flat list of samples """
    samples = sampler.chain[:, nburn:, :].reshape(-1, ndim)
    if show:   
        fig = corner.corner(samples, 
                            labels=["$a$", "$b$", "$\ln\,f$"])
    """ Compute the quantiles. """
    samples[:, 2] = np.exp(samples[:, 2])
    a_mcmc, b_mcmc, f_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
                                 zip(*np.percentile(samples, [16, 50, 84],
                                                    axis=0)))
    print("""MCMC result:
             a = {0[0]} +{0[1]} -{0[2]}
             b = {1[0]} +{1[1]} -{1[2]} 
             f = {2[0]} +{2[1]} -{2[2]}
            """.format(a_mcmc, b_mcmc, f_mcmc))

    return a_mcmc, b_mcmc, f_mcmc

In [101]:
logr50BcgErr = logr50BcgM * 0.0 + 0.02

a_mcmc, b_mcmc, f_mcmc = emceeLine(logmBcgM, logr50BcgM, logr50BcgErr, 
                                   nwalkers=300, ndim=3, nburn=2000, 
                                   nstep=8000, show=True)

# Best paramters
aBcgB, aBcgU, aBcgL = a_mcmc
bBcgB, bBcgU, bBcgL = b_mcmc

plt.show()


Start the MCMC runs
CPU times: user 1min 2s, sys: 519 ms, total: 1min 2s
Wall time: 1min 3s
Done
MCMC result:
             a = 0.31902946998 +0.121622124606 -0.0814234211914
             b = -2.5418545465 +1.00706445718 -1.542711606 
             f = 0.106828204466 +0.0132837901489 -0.00944553259144
            

In [103]:
plt.xlim(11.4, 12.6)
plt.ylim(0.45, 2.59)

plt.xlabel('$\log\ (M_{\star}/M_{\odot})$', fontsize=35)
plt.ylabel('$\log\ (R_{\mathrm{50}}/\mathrm{Kpc})$', fontsize=35)

plt.scatter(logmGamaU, logr50GamaU, c='k', marker='o', alpha=0.2)
plt.scatter(logmBcgU, logr50BcgU, c='r', marker='h', s=70, alpha=0.7)

# Red-sequence
xx = np.linspace(10.0, 13.0, 100)

# MCMC result:
# a = 0.960387107648 +0.0755428582291 -0.0750014260391
# b = -10.0134319999 +0.883141848718 -0.889418975333 
# f = 0.168797744636 +0.0104793857322 -0.00963780589871
plt.plot(xx, (0.96038 * xx - 10.01343),
        linestyle='--', color='r', linewidth=3.5)
#yy1 = (aBcgB + aBcgU) * xx + (bBcgB)
#yy2 = (aBcgB - aBcgL) * xx + (bBcgB)
#plt.fill_between(xx, yy1, yy2, 
#                facecolor='r', interpolate=True, alpha=0.15)

# MCMC result:
# a = 0.935475557486 +0.026570857285 -0.026374388974
# b = -9.81576469101 +0.305357355175 -0.307325596592 
# f = 0.15056796201 +0.00316298933649 -0.00311714344817
plt.plot(xx, (0.93546 * xx - 9.81576),
        linestyle='-.', color='k', linewidth=3.5)

plt.show()



In [104]:
plt.xlim(11.4, 12.6)
plt.ylim(0.45, 2.59)

plt.xlabel('$\log\ (M_{\star}/M_{\odot})$', fontsize=35)
plt.ylabel('$\log\ (R_{\mathrm{90}}/\mathrm{Kpc})$', fontsize=35)

plt.scatter(logmGamaU, logr90GamaU, c='k', marker='o', alpha=0.2)
plt.scatter(logmBcgU, logr90BcgU, c='r', marker='h', s=70, alpha=0.7)

# Red-sequence
xx = np.linspace(10.0, 13.0, 100)

# MCMC result:
# a = 0.960387107648 +0.0755428582291 -0.0750014260391
# b = -10.0134319999 +0.883141848718 -0.889418975333 
# f = 0.168797744636 +0.0104793857322 -0.00963780589871
plt.plot(xx, (0.96038 * xx - 10.01343),
        linestyle='--', color='r', linewidth=3.5)
#yy1 = (aBcgB + aBcgU) * xx + (bBcgB)
#yy2 = (aBcgB - aBcgL) * xx + (bBcgB)
#plt.fill_between(xx, yy1, yy2, 
#                facecolor='r', interpolate=True, alpha=0.15)

# MCMC result:
# a = 0.935475557486 +0.026570857285 -0.026374388974
# b = -9.81576469101 +0.305357355175 -0.307325596592 
# f = 0.15056796201 +0.00316298933649 -0.00311714344817
plt.plot(xx, (0.93546 * xx - 9.81576),
        linestyle='-.', color='k', linewidth=3.5)

plt.show()



In [116]:
plt.xlim(11.4, 12.6)
plt.ylim(-0.19, 1.59)

plt.xlabel('$\log\ (M_{\star}/M_{\odot})$', fontsize=35)
plt.ylabel('$\log\ \gamma $', fontsize=40)

plt.scatter(logmGamaU, (logr50GamaU + 0.94 * (11.0 - logmGamaU)), 
            c='k', alpha=0.15, s=20)

plt.scatter(logmBcgU, (logr50BcgU + 0.94 * (11.0 - logmBcgU)), 
            c='r', alpha=0.8, s=70, marker='h')

plt.show()



In [127]:
plt.xlabel('$\log\ \gamma $', fontsize=40)

gGama = (logr50GamaM + 0.94 * (11.0 - logmGamaM))
gGama = gGama[np.isfinite(gGama)]

gBcg = (logr50BcgM + 0.94 * (11.0 - logmBcgM))
gBcg = gBcg[np.isfinite(gBcg)]
         
plt.hist(gGama, 40, normed=True,
         edgecolor='k', alpha=0.95, histtype='step', linewidth=4.0)

plt.hist(gBcg, 10, normed=True, 
         facecolor='r', alpha=0.4, histtype='stepfilled')

plt.show()


Separate the samples into ones above/below the median profiles

  • For Alexie (16-01-21)

In [101]:
allProfs = copy.deepcopy(bcgProfs)
allProfs += memProfs
allProfs += gamaProfs
print("## Have %d profiles in total" % len(allProfs))


## Have 112 profiles in total

In [102]:
mpStack, mpMed, mpAvg, mpStd = organizeSbp(allProfs, col1='muI1', 
                                           col2='LOGM2LI_C', kind='mass')

In [103]:
mpAbove = []
mpBelow = []

for prof in allProfs: 
    rkpc = prof['rKpc']
    mp = prof['muI1'] + prof.meta['LOGM2LI_C']
    mSep1 = mp[(rkpc >= 40.0) & (rkpc <= 90.0)] - mpMed[1][(rkpc >= 40.0) & (rkpc <= 90.0)]
    mSep2 = mp[(rkpc >= 40.0) & (rkpc <= 90.0)] - mpMed[0][(rkpc >= 40.0) & (rkpc <= 90.0)]
    
    mSep3 = mp[(rkpc >= 10.0) & (rkpc <= 100.0)] - mpMed[1][(rkpc >= 10.0) & (rkpc <= 100.0)]
    mSep4 = mp[(rkpc >= 10.0) & (rkpc <= 100.0)] - mpMed[0][(rkpc >= 10.0) & (rkpc <= 100.0)]
    
    if (np.nanmedian(mSep1) >= 0.03) and (np.nanmax(mSep3) <= 0.4) and (np.nanmin(mSep4) >= -0.45): 
        mpAbove.append(prof)
    if (np.nanmedian(mSep2) <= -0.03) and (np.nanmax(mSep3) <= 0.4) and (np.nanmin(mSep4) >= -0.45):
        mpBelow.append(prof)
        
print("## %d profiles above the median profile" % len(mpAbove))
print("## %d profiles below the median profile" % len(mpBelow))


## 40 profiles above the median profile
## 37 profiles below the median profile

In [106]:
print("# 11.8 < logM < 12.1; Above the median profile")
print("# RA   DEC   Z   DUMMMY")
for pp in mpAbove: 
    # print(pp.meta['PREFIX'], pp.meta['GALID'])
    if pp.meta['PREFIX'] == 'redBCG':
        indexUse = np.where(bcgTab['ID_CLUSTER'] == int(pp.meta['GALID']))[0][0]
        raUse = bcgTab[indexUse]['RA_BCG']
        decUse = bcgTab[indexUse]['DEC_BCG']
        zUse = bcgTab[indexUse]['z_use']
        print("%10.7f  %10.7f  %10.7f  0.001" % (raUse, decUse, zUse))
        del raUse, decUse, zUse
    elif pp.meta['PREFIX'] == 'redMem':
        indexUse = np.where(memTab['ISEDFIT_ID'] == int(pp.meta['GALID']))[0][0]
        raUse = memTab[indexUse]['RA_MEM']
        decUse = memTab[indexUse]['DEC_MEM']
        zUse = memTab[indexUse]['z_use']
        print("%10.7f  %10.7f  %10.7f  0.001" % (raUse, decUse, zUse))
        del raUse, decUse, zUse
    elif pp.meta['PREFIX'] == 'gama':
        indexUse = np.where(gamaTab['ISEDFIT_ID'] == int(pp.meta['GALID']))[0][0]
        raUse = gamaTab[indexUse]['ra_hsc']
        decUse = gamaTab[indexUse]['dec_hsc']
        zUse = gamaTab[indexUse]['z_use']
        print("%10.7f  %10.7f  %10.7f  0.001" % (raUse, decUse, zUse))
        del raUse, decUse, zUse


# 11.8 < logM < 12.1; Above the median profile
# RA   DEC   Z   DUMMMY
35.4406376  -3.7719089   0.4329238  0.001
37.4331900  -3.6148702   0.3276046  0.001
216.2429206   1.1536153   0.3041700  0.001
37.6615690  -4.9910022   0.2919367  0.001
180.1294512   0.3241922   0.2470500  0.001
215.1510151   0.8831244   0.3303000  0.001
221.0377519   0.1784809   0.2966200  0.001
132.0423310   1.6472494   0.3505900  0.001
133.9173147  -0.5492296   0.2705300  0.001
218.7652476  -1.0008796   0.2159100  0.001
180.1294512   0.3241922   0.2470500  0.001
180.1288837   0.3235313   0.2485600  0.001
133.9173147  -0.5492296   0.2705300  0.001
37.6615690  -4.9910022   0.2919367  0.001
132.5412022   0.8679244   0.2936500  0.001
221.0377519   0.1784809   0.2966200  0.001
216.2429206   1.1536153   0.3041700  0.001
37.4331900  -3.6148702   0.3276046  0.001
215.1510151   0.8831244   0.3303000  0.001
131.0702070   1.0681188   0.3401200  0.001
132.0423310   1.6472494   0.3505900  0.001
131.6304946   1.8580487   0.3853700  0.001
133.7843291  -0.7493446   0.2690200  0.001
136.5053606  -0.4263111   0.2989800  0.001
129.8249027   1.0250499   0.2675800  0.001
131.3998186   1.2317431   0.2876300  0.001
130.1473257   1.6794311   0.3567200  0.001
133.1722268   1.9600373   0.3337200  0.001
132.8083334   1.8933587   0.3995129  0.001
220.3655083  -0.0705650   0.3634600  0.001
215.1734875  -0.4036602   0.3573300  0.001
215.2737389   0.5127006   0.3854300  0.001
215.1835080   0.9761643   0.3066300  0.001
216.6032970   0.8286543   0.3813900  0.001
178.9478213  -0.3141067   0.2588500  0.001
180.2363122  -1.3431937   0.3263200  0.001
177.9783173   1.3609765   0.3095400  0.001
181.5060946   0.5835640   0.3577000  0.001
180.5279800  -0.2415063   0.2488500  0.001
178.5199483   0.6052125   0.2287700  0.001

In [107]:
aStack, aMed, aAvg, aStd = organizeSbp(mpAbove, col1='muI1', 
                                       col2='LOGM2LI_C', kind='mass')
bStack, bMed, bAvg, bStd = organizeSbp(mpBelow, col1='muI1', 
                                       col2='LOGM2LI_C', kind='mass')

In [108]:
# --------------------------------------------------------------------------------------- #
fig = plt.figure(figsize=(12, 12))
fig.subplots_adjust(left=0.1, right=0.95, bottom=0.15)
ax1 = fig.add_subplot(111)
ax1.minorticks_on()

# 10 Kpc 
ax1.axvline(10.0 ** 0.25, linewidth=4.0, c='k', linestyle='-', zorder=0, alpha=0.2)
# 100 Kpc
ax1.axvline(100.0 ** 0.25, linewidth=4.0, c='k', linestyle='-', zorder=0, alpha=0.2)

# z = 0.2 : 1"=3.3 Kpc
ax1.axvline(3.3 ** 0.25, linewidth=4.0, c='b', linestyle='--', alpha=0.2, zorder=0)
# z = 0.4 : 1"=5.4 Kpc
ax1.axvline(5.4 ** 0.25, linewidth=4.0, c='b', linestyle='-.', alpha=0.2, zorder=0)

for ss in mpStack:
    ax1.plot(RSMA_COMMON, ss, c='k', alpha=0.02, linewidth=0.8)
for aa in aStack:
    ax1.plot(RSMA_COMMON, aa, c='r', alpha=0.2, linewidth=0.8)
for bb in bStack:
    ax1.plot(RSMA_COMMON, bb, c='b', alpha=0.2, linewidth=0.8)   

ax1.fill_between(RSMA_COMMON, mpMed[0], mpMed[1], 
                 facecolor='k', edgecolor='none', alpha=1.0, zorder=1005)

ax1.fill_between(RSMA_COMMON, aMed[0], aMed[1], 
                 facecolor='r', edgecolor='none', alpha=1.0, zorder=1005)
ax1.fill_between(RSMA_COMMON, bMed[0], bMed[1], 
                 facecolor='b', edgecolor='none', alpha=1.0, zorder=1005)

ax1.text(0.40, 0.90, '$11.6 < \log (M_{\star}) < 11.8$', 
         verticalalignment='bottom', horizontalalignment='left',
         fontsize=40.0, transform=ax1.transAxes)

ax1.set_xlabel('$R^{1/4}\ (\mathrm{Kpc})$', size=32)
ax1.set_ylabel('$\log ({\mu}_{\star}/[M_{\odot}\ \mathrm{Kpc}^{-2}])$', size=38)

ax1.set_xlim(0.5, 4.1)
ax1.set_ylim(4.01, 9.79)

for tick in ax1.xaxis.get_major_ticks():
    tick.label.set_fontsize(30) 
for tick in ax1.yaxis.get_major_ticks():
    tick.label.set_fontsize(30) 
    
ax1.spines['top'].set_linewidth(3.5)
ax1.spines['right'].set_linewidth(3.5)
ax1.spines['bottom'].set_linewidth(3.5)
ax1.spines['left'].set_linewidth(3.5)

#fig.savefig('hscMassive_mprof_m2a_1.png', dpi=90)



In [ ]: