In [1]:
#
# LF test with noise
# C = S + N, where noise N = sigma^2 I, where I is the identity matrix
#
# We generate 20 values of parameters x for function logLF(x)
# We chose our parameters around the value of Boltzmann code generated C_3
# CAMB generated C_3 equals 5.88275e-10
#
# 1. Set all C_l = 0 except C_3 DONE
#
# 2. LF is based on a matrix C that only has a P_3 term. DONE
#
# 3. Add to that matrix a white noise term (sigma^2 on the diagonal). DONE
#
# 4. LF now has *exactly two* free parameters, C_3 and sigma^2. DONE
#
# 5. What is LF vs C_3 (at, perhaps, a couple values of sigma^2)?
#
# 6. over-plot |a_3m|^2 values
#
#
In [2]:
%matplotlib inline
import math
import matplotlib.pyplot as plt
import numpy as np
import healpy as hp
import pyfits as pf
import astropy as ap
import os
from scipy.special import eval_legendre ##special scipy function
In [3]:
#
# Review of Likelihood Formalism:
#
# -2 loglikelihood is
# -2 ln L \propto m^T C^-1 m + lnDet C
# where C = S + N
# We are working with noiseless maps, N = 0, so C = S
#
# In real space,
# data: the temperature map,
# parameters: theoretical CAMB generated C_l, C^theory_l
#
# m = array of temperature pixels
# S = S_ij
# N = diagonal noise (but here it is noiseless! N=0)
#
# In spherical harmonic space
# data: the Healpix anafast outputarray of a_lm (or similarly, \hat{C}_l )
# parameters: the theoretical C_l
#
# m = a_lm coefficients
# S is diagonal (C_2, C_3, etc.)
# N is non-sparse matrix (but here it is noiseless! N=0)
#
#
# NOTE: The correct standard is to systematically remove the monopole, dipole terms l=0,l=1
# Also, we use in the following lmax = 2*nside
In [4]:
cd ~/Desktop/CMBintheLikeHoodz/Likelihood_Comparison
In [5]:
camb1 = "camb_nside16_lmax32_alms.fits"
camb2 = "camb_nside16_lmax32_map.fits"
camb3 = "camb_nside16_lmax32_scalcls.fits"
planck1 = "100GHz_nside16_lmax32_cls.fits"
planck2 = "100GHz_nside16_lmax32_cmb_alm.fits"
planck3 = "100GHz_nside16_lmax32_sky_alm.fits"
planck4 = "100GHz_nside16_lmax32_skymap.fits"
nside = 16
In [6]:
npix = 12*(nside**2) #total number of pixels, npix
LMAX = ((2*nside)) #maximum l of the power spectrum C_l
heal_npix = hp.nside2npix(nside) # Healpix calculated npix
print "The total number of pixels is " + str(npix)
print "The maximum ell of the power spectrum C_l set to lmax = 2*nside " +str(LMAX)
print "Healpix tells me total number of pixels npix is equal to " + str(heal_npix)
In [7]:
#
# Begin with a Munich Planck-simulated map, and CAMB Boltzmann-code generated C_l values
#
In [8]:
# Theoretical scalar C_l array, CAMB
#
# open a FITS file, theoretical C_l values generated by CAMB
# type()=pyfits.hdu.hdulist.HDUList
cl_open = pf.open(camb3)
# recall camb3 = "camb_nside16_lmax32_scalcls.fits"
In [9]:
theoryCls_arr1 = cl_open[1].data
print theoryCls_arr1[:10]
# Recall there are four columns: temp, E pol, B pol, grad-temp cross terms
# first two values are zero, i.e. monopole, dipole
# XXX.field() references columns by 0-index
# field(0) is temperature values
# all Cl scalar temp values put into ndarray
# type()=numpy.ndarray
In [10]:
cltemps = theoryCls_arr1.field(0)
print cltemps
print "The length of the array of theoretical Cl's is " +str(len(cltemps))
print "The array contains [C_0, C_1, C_2,..., C_" +str(len(cltemps)-1) + "]"
#print type(cltemps)=numpy.ndarray
In [11]:
# remove monopole l=0 and dipole l=1
theoryCl = cltemps[2:]
# len(theoryCl) = 31
print theoryCl
# theoryCl is np.ndarray of theoretical [C_2, C_3, C_4, ..., C_32]
In [12]:
# Our input data is Gaerching generated, noiseless full-sky map
# Temperature map: here we use Planck simulated map from Munich, not CAMB map
# http://gavo.mpa-garching.mpg.de/planck/
#
# Read in with Healpy routine/function
#
# Use planck4 = "100GHz_nside16_lmax32_skymap.fits"
# This is a simulated data, 100GHz (where CMB dominates), no foregrounds
#
mapread_camb2 = hp.read_map(camb2) # Healpix routine, input the sky map
In [13]:
hp.mollview(mapread_camb2) # visualization of full-sky CMB map, nside=16, lmax=32
In [14]:
# The uploaded temperature map is mapread_planck4 = hp.read_map(planck4)
print type(mapread_camb2) # type(mapread_planck4) = np.ndarray
print mapread_camb2.shape # mapread_planck4.shape = (3072, ) = (N_pix, )
#
# rename array for convenience
tempval = mapread_camb2
#print tempval
In [15]:
# Next, we use healpy map2alm to tranform to alm values
# Our input data is CAMB generated, noiseless full-sky map
# We calculate an array of a_lm from this by using Healpix map2alm, a subroutine of anafast
#
# map2alm only outputs m >=0 values, because m = -l values are equivalent to m = +l values
#
# Using map2alm, the length of the alm array is expected to be:
# (mmax * (2 * lmax + 1 - mmax)) / 2 + lmax + 1)"
#
# For mmax = lmax, this is l(l+1)/2 + l + 1
# i.e.
# l = 0, there is 1
# l = 1, there is 3
# l = 2, there is 6
# l = 3, there is 10
# l = 4, there is 15
# etc.
almarr = hp.map2alm(mapread_camb2) # This is an array of a_lm values
print "The array of spherical harmonic coefficients a_lm is"
#print almarr
print "The arr.shape is " + str(almarr.shape)
print "The length of a_lm array is " + str(len(almarr))
#
print "For l=3, map2alm gives (a_00, a_10, a_11, a_20, a_21, a_22, a_30, a_31, a_32, a_33)"
print "However, this is NOT the order of the output! See below"
# In the Fortran F90 subroutines, complex alm are stored in an array that has
# two dimensions to contain coefficients for positive and negative m values.
# Healpy doesn't do this....I think
print "============================="
print "============================="
print "Check indices with healpy.sphtfunc.Alm.getidx(lmax, l, m)"
print "Default ordering of healpy.map2alm() output is "
print "(0,0), (1,0), ..., (lmax, 0),"
print "(1,1), (2,1), ...., (lmax, 1),"
print "(2,2), .... (lmax, 2),(3,3), ...., (lmax, 3), etc. , .... (lmax, lmax)."
In [16]:
# ==========================
# DEMONSTRATION
# Notice how a_lm is indexed
# ==========================
mmm = np.arange(12) # define a map, i.e. an array of 12 "pixels"
lmaxxx = 4
alm = hp.map2alm(mmm, lmax=lmaxxx) # spherical harmonic transform
lm = hp.map2alm(mmm, lmax=lmaxxx) # spherical harmonic transform
print(alm)
print(alm.shape)
# So alm is actually a 1D vector.
# How is alm indexed?
l, m = hp.Alm.getlm(lmax=lmaxxx)
print(l)
print(m)
print "The l values are "+str(l)
print "The m values are "+str(m)
print " (l,m) is in order " +str(list(zip(l,m)))
#
# l, m = hp.Alm.getlm(lmax=lmax)
# print(l)
# [0 1 2 1 2 2]
# print(m)
# [0 0 0 1 1 2]
#
#
# So, for l = 2, m is [0, 1, 2].
#
# ==========================
# Notice how a_lm is indexed
# ==========================
#
#
#
In [17]:
# Check with healpy.sphtfunc.Alm.getidx(lmax, l, m)
# Returns index corresponding to (l,m) in an array describing alm up to lmax.
#
ell, emm = hp.Alm.getlm(lmax=32)
print "len(ell) is " +str(len(ell))
print "len(emm) is "+str(len(emm))
print "l values are "+str(ell[:10])
print "m values are "+str(emm[:10])
pairs = list(zip(ell, emm)) # put values together in pairs, zip()
ellemm = np.vstack((ell,emm)).T # equivalent to list(zip(ell,emm)), but uses numpy throughout
print "Indices for a_lm for lmax (l, m) are:"
print str(pairs[:50]) # The expected output
In [18]:
print ellemm[:10]
In [19]:
#
# For our first test, mode l = 3, we need to access a_lm coefficients a_30, a_31, a_32, a_33
# To find this for lmax = 32, we use
# healpy.sphtfunc.Alm.getidx(lmax, l, m)
# Returns index corresponding to (l,m) in an array describing alm up to lmax.
#
# Find the indices
index_a30 = hp.Alm.getidx(lmax=32, l=3, m=0)
index_a31 = hp.Alm.getidx(lmax=32, l=3, m=1)
index_a32 = hp.Alm.getidx(lmax=32, l=3, m=2)
index_a33 = hp.Alm.getidx(lmax=32, l=3, m=3)
In [20]:
print "Index a_30 is " +str(index_a30)
print "Index a_31 is "+str(index_a31)
print "Index a_32 is "+str(index_a32)
print "Index a_33 is "+str(index_a33)
In [21]:
#
# Create an array with only the values a_3m, i.e. a_30, a_31, a_32, a_33
#
# First convert the array of alm coefficients into a real
#
realalm = almarr.real
#
print realalm[:36]
In [22]:
empty_almlist = []
#
a30 = realalm[3]
a31 = realalm[35]
a32 = realalm[66]
a33 = realalm[96]
#
print "a30 is " + str(a30)
print "a31 is " + str(a31)
print "a32 is " + str(a32)
print "a33 is " + str(a33)
#
print str(pairs[3]) # Check with our output above
print str(pairs[35])
print str(pairs[66])
print str(pairs[96])
#
empty_almlist.append(a30)
empty_almlist.append(a31)
empty_almlist.append(a32)
empty_almlist.append(a33)
#
print empty_almlist
In [23]:
# create array of real-valued alm coefficients, a30 a31 a32 a33
realalm3 = np.asarray(empty_almlist) # np.asarray() converts input into an array
print realalm3
In [24]:
# Repeat the above procedure for mode l = 4, i.e. a40 a41 a42 a43 a44
# Find the indices
index_a40 = hp.Alm.getidx(lmax=32, l=4, m=0)
index_a41 = hp.Alm.getidx(lmax=32, l=4, m=1)
index_a42 = hp.Alm.getidx(lmax=32, l=4, m=2)
index_a43 = hp.Alm.getidx(lmax=32, l=4, m=3)
index_a44 = hp.Alm.getidx(lmax=32, l=4, m=4)
#
print "Index a_40 is " +str(index_a40)
print "Index a_41 is "+str(index_a41)
print "Index a_42 is "+str(index_a42)
print "Index a_43 is "+str(index_a43)
print "Index a_44 is "+str(index_a44)
#
# Check with the above ouput
print str(pairs[4])
print str(pairs[36])
print str(pairs[67])
print str(pairs[97])
print str(pairs[126])
#
emptylistalm2 = []
#
#print realalm
#
a40 = realalm[4]
a41 = realalm[36]
a42 = realalm[67]
a43 = realalm[97]
a44 = realalm[127]
#
print "a40 is " + str(a40)
print "a41 is " + str(a41)
print "a42 is " + str(a42)
print "a43 is " + str(a43)
print "a44 is " + str(a44)
#
emptylistalm2.append(a40)
emptylistalm2.append(a41)
emptylistalm2.append(a42)
emptylistalm2.append(a43)
emptylistalm2.append(a44)
#
#print emptylistalm2
In [25]:
# create array of real-valued alm coefficients, a40 a41 a42 a43 a44
realalm4 = np.asarray(emptylistalm2) # np.asarray() converts input into an array
print realalm4
In [26]:
# Calculate (abs(alm))**2 i.e. |alm|^2
abs_alm3 = np.absolute(realalm3)
abs_alm4 = np.absolute(realalm4)
print abs_alm3
print abs_alm4
# Now calculate the squares element-wise, x**2
alm3_squared = abs_alm3**2
alm4_squared = abs_alm4**2
print alm3_squared
print alm4_squared
In [27]:
# For l = 3 test, we need theoretical value of C_3; ditto for l = 4
print theoryCl
C3 = theoryCl[1]
print "theory C_3 is " +str(C3)
C4 = theoryCl[2]
print "theory C_4 is "+str(C4)
In [28]:
# For lmax = 32, we must create an array of ell values, i.e. [0 1 2 3....31 32]
ell = np.arange(33)
print ell
#
# Subtract the monopole and dipole, l=0, l=1
ellval = ell[2:]
print ellval
In [29]:
# Calculate an array of (2*l + 1)C_l
# i.e. 5*C_2, 7*C_3, 9*C_4, 11*C_5, 13*C_6, ...
print theoryCl
for i in ellval:
paramsCl = (2*ellval + 1)*theoryCl # define array (2*l + 1)C_l
print paramsCl
In [30]:
norm = ((2*ellval + 1))/(4*math.pi)
print norm
In [31]:
anafastCl = hp.anafast(mapread_camb2, lmax=32)
#len(anafastCl) = 33
# remove monopole and dipole values, l=0, l=1
hatCl = anafastCl[2:] #len() = 31, type() = np.ndarray
hatC3 = hatCl[1] # index 0 = C2, 1 = C3, etc.
hatC4 = hatCl[2]
print hatC3
print hatC4
In [32]:
#
# Add a_lm squared, |a_lm|^2
#
print "The values for |a_lm|^2 are : "
print "For |a_3m|**2 such that a_30, a_31, a_32, a_33: "
print str(alm3_squared)
print "And for |a_4m|**2 such that a_40, a_41, a_42, a_43, a_44: "
print str(alm4_squared)
In [33]:
# =========================================================================
#
# =========================================================================
#
# Data:
# tempval # the array of pixel values, (3072,)
# realalm3 # array of alm values, a30, a31, a32, a33
# realalm4 # array of alm values, a40, a41, a42, a43, a44
# alm3_squared # array of |alm|^2, (abs(a3m))**2
# alm4_squared # array of |alm|^2, (abs(a4m))**2
# hatCl # array of anafast-calculated \hat{C}_l values, l=2 to l=32
# hatC3 # \hat{C}_3 value
# hatC4 # \hat{C}_4 value
#
# Parameters:
# theoryCl # array of Boltzmann code generated C_l, i.e. C^{theory}_l
# paramsCl # array of (2*l + 1)C_l from l=2 to l=lmax
# C3 # array of C_3 value
# C4 # array of C_4 value
#
# Array of ell's:
# ellval # array of l = 2 to l=lmax
# # [2 3 4 ... 31 32]
# norm # array of (2*l+1)/4pi
# # [5/4pi 7/4pi 9/4pi 11/4pi ... 63/4pi 65/4pi]
# =========================================================================
#
# =========================================================================
In [34]:
#
# Next, create the matrix, n_i /cdot n_j
# solely using Healpy routines, i.e. taking the dot product of the vectors
# The result is "dotproductmatrix"
#
# npix = 3072
In [35]:
totalpix = np.arange(npix) # An array indexing the total number of pixels
In [36]:
## healpy.pixelfunc.pix2vec(nside, ipix, nest=False)
##
## will give three arrays
## arrays of all x values, all y values, all z values
## RING scheme default
# len()=3
# type()=tuple
#
#
vecval = hp.pix2vec(nside, totalpix) #Nside = 16, type()=tuple
In [37]:
vecvalx = vecval[0] #shape (3072,)
vecvaly = vecval[1]
vecvalz = vecval[2]
In [38]:
# First arrange arrays vertically
# numpy.vstack = Stack arrays in sequence vertically (row wise), input sequence of arrays
totalvecval = np.vstack((vecvalx, vecvaly, vecvalz)) #type()=numpy.ndarray
In [39]:
trans = totalvecval.T #transpose
In [40]:
dotproductmatrix = trans.dot(totalvecval) #take the dot product
# dotproductmatrix.shape = (npix, npix) = (3072, 3072)
# type(dotproductmatrix) = np.ndarray
In [41]:
# =========================================================
# =========================================================
#
# \Sum_l (2*l + 1)/4pi C^th_l P_l (dotproductmatrix)
# sum from l=2 to l=lmax
#
# arrays l = [2 3 4 .... lmax]
# C_l = [C_2 C_3 .... C_lmax]
#
# The correct way to do the summation:
#
# Step 1: calculate the matrix
# M = dotproductmatrix
#
# Step 2: evaluate the function P_l(x) for each entry of the matrix
# OUTPUT: [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ]
#
# Step 3: (2*l +1)/4pi from l=2 to l=lmax
# [5/4pi 7/4pi 9/4pi 11/4pi .... 65/4pi ]
#
# Step 4: multiply
# [5/4pi*P_2(M) + 7/4pi*P_3(M) +...... + 65/4pi*P_32(M)]
#
#
# Step 5: multiply by theoretical CAMB values, [C_2 C_3 C_31 C_32]
# [5/4pi**C_2* P_2(M) + 7/4pi*C_3* P_3(M) +...... + 65/4pi*C_32* P_32(M)]
#
# Step 6: This is an array of S_ij for each theory C_l, l=2 to l=32
#
#
#
# =========================================================
# =========================================================
In [42]:
# =========================================================
# =========================================================
#
# Now calculate the likelihood
# -2lnL \propto m^T C^-1 m + ln det C + N ln (2pi)
#
# First term, m^T C^-1 m is the "model fit term"
# Second term, lndetC is the "complexity penalty"
# Third term, N ln 2pi, a constant
#
# m = tempval
# C = Sij
#
# STEP 1: do inverse of Sij
# invSij = np.linalg.inv(Sij)
#
# STEP 2: do matrix mulplication, m.T*inv(C)*m
#
# lnL_modelfit_terms = np.array([np.dot(tempval.T , np.dot(invSij[i] , tempval) ) for i in range(invSij.shape[0])])
#
# STEP 3: do logdet Sij
#
# logdetC = np.linalg.slogdet(Sij) #computes sign and log det C
# logdetC[1]
#
# STEP 4: compute N_pix * 2pi
#
# Npix2pi = (npix)*2*math.pi
#
# Step 5: -2loglikelihood = m.T*inv(C)*m + logdetC[1] + Npix2pi
#
# =========================================================
# =========================================================
In [43]:
# CODE BOTTLENECK!
#
# Evaluate Legendre from l=2 to l=lmax for each matrix entry
# [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ]
#
# WITHOUT BROADCASTING, one would do something like
# PlMat = []
# for i in ellval:
# PlMat.append( eval_legendre(i, dotproductmatrix) )
#
#
# With broadcasting, we use
PlMat = eval_legendre(ellval[:, None, None], dotproductmatrix)
# PlMat = [P_2(M) P_3(M) P_4(M) .... P_lmax(M) ]
# PlMat is an array, len()=31 of 31 3072 by 3072 matrices
# PlMat.shape = (31, 3072, 3072)
In [44]:
# multiply PlMat by (2*l+1)/4pi, i.e. norm
norm_matrix = norm[:, None, None] * PlMat
# [5/4pi * P_2(M) 7/4pi * P_3(M) .... 65/4pi * P_32(M)]
In [45]:
# JULY 1, 2015
#
#
# Here we define the LF
#
# Next we should writen the REAL-space log-likelihood, -2 lnLikefunction
# this is a function of parameters
# plot against parameters, theory C_l
#
# Likelihood function should use -2lnL /propto m^T C^-1 M + log det M
#
In [46]:
# TEST C_3 Likelihood
#
# The covariance matrix is a function of variable "x" where "x" is "C_3", an unknown parameter.
#
# Our covariance matrix is therefore S_ij = 7/4pi * x * P_3(matrix)
# (We set l=3, i.e. l=3, and P_3)
#
# The LF is then a function of x, LF(x). This is the only parameter we vary.
#
# LF = -2loglikelihood /propto T^T inv(S_ij) T + log det (Sij) + N log (2pi)
#
# We then plot LF(x) vs. parameters x.
#
In [47]:
# define pixel-value arrays
mT = np.matrix(tempval) # mT.shape = (1, 3072)
m = np.matrix(tempval).T # m.shape = (3072, 1)
Npix2pi = (npix)*2*math.pi # LF constant
In [48]:
print C3 # Boltzmann code CAMB output
In [49]:
# generate a number of samples to plot for x
# Boltzmann code value for C3 is 5.88275e-10
# start at 1e-10, end at 9e-10
# default samples generated is 50
In [50]:
vary_x_samples1 = np.linspace(1e-10, 9e-10, num=20 ) #set default num = 20
In [51]:
print vary_x_samples1
In [52]:
# create Sij array, len()=31, l=2 to l=32
# Sij = norm_matrix * theoryCl[:, None, None]
# [5/4pi*C_2*P_2(M) 7/4pi*C_3*P_3(M) .... 65/4pi*C_32*P_32(M)]
In [53]:
print hatC3 # anafast generated C_l for l = 3 from sky map
print C3 # CAMB generated C_3, Boltzmann code output
In [54]:
print alm3_squared # a_3m extracted from sky map, absolute value, squared
In [55]:
np.set_printoptions(threshold=100000) # Default is threshold=1000
## Use this to print all values, disables corner printing
In [56]:
# print out all matrices and parts of the logLF
print vary_x_samples1 # generated from 1e-10 to 9e-10
In [57]:
print Npix2pi # LogLF constant
In [58]:
sigma2 = np.logspace(-12, -14, num=30 ) #set default num = 30
print sigma2
In [59]:
print 7*hatC3
print hatC3
In [60]:
# Second test with rescaled values
vary_x_samples3 = np.logspace(-8, -12, num=30) #num = 30
sigma3 = np.logspace(-12, -14, num=30)
In [61]:
print vary_x_samples3
print sigma3
In [62]:
# For N matrix, set the identity
id_mat = np.identity(3072)
# print id_mat # This is a (3072, 3072) matrix
In [63]:
noiseresult = sigma2[:, None, None] * id_mat[None, :, :]
#print noiseresult
In [64]:
# Now, rescale
#
# Multiply the CMB maps by 1e6 and the C matrix entries by 1e12 and try again.
#
tempp = (1e6)*tempval # multiply CMB maps by 1e6
def LogLikehood_wNoise_1e12(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e12)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e12)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
In [ ]:
In [65]:
print sigma3
print "*************"
print vary_x_samples3
In [66]:
print vary_x_samples3
print sigma3
In [67]:
xaxis3 = vary_x_samples3
yaxis3 = LogLikehood_wNoise_1e12(vary_x_samples3, sigma3)
fig3 = plt.figure()
plt.plot(xaxis3, yaxis3)
plt.xscale("log")
fig3.suptitle('Real-space LF vs C_3 values')
plt.xlabel('C_3 values, num=30, logspace from 1e-8 to 1e-12')
plt.ylabel('-2 ln likelihood L(T|C_3)')
plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "vary_x_samples3 = np.logspace(-8, -12, num=30) #num = 30"
print "sigma3 = np.logspace(-12, -14, num=30)"
print "LogLikehood_wNoise_1e12(vary_x_samples3, sigma3)"
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [68]:
print vary_x_samples3
print LogLikehood_wNoise_1e12(vary_x_samples3, sigma3)
In [69]:
#
# Plot noise sigma from e-20 to e-22
#
tempp = (1e6)*tempval # multiply CMB maps by 1e6
def LogLikehood_wNoise_1e20(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e20)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e20)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
#
# Plot noise sigma from e-20 to e-22
#
vary_x_samples12 = np.logspace(-8, -12, num=40) #num = 40
sigma12 = np.logspace(-20, -22, num=40)
xaxis12 = vary_x_samples12
yaxis12 = LogLikehood_wNoise_1e20(vary_x_samples12, sigma12)
fig12 = plt.figure()
plt.plot(xaxis12, yaxis12)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-8 to 1e-12')
plt.ylabel('-2 ln likelihood L(T|C_3)')
plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "vary_x_samples12 = np.logspace(-8, -12, num=40) #num = 40"
print "sigma12 = np.logspace(-20, -22, num=40)"
print "LogLikehood_wNoise_1e20(vary_x_samples12, sigma12)"
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [70]:
print vary_x_samples12
print LogLikehood_wNoise_1e20(vary_x_samples12, sigma12)
In [71]:
#
# Plot noise sigma from e-21 to e-23
#
tempp = (1e6)*tempval # multiply CMB maps by 1e6
def LogLikehood_wNoise_1e21(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e21)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e21)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
#
# Plot noise sigma from e-21 to e-23
#
vary_x_samples125 = np.logspace(-8, -12, num=40) #num = 40
sigma125 = np.logspace(-21, -23, num=40)
xaxis125 = vary_x_samples125
yaxis125 = LogLikehood_wNoise_1e21(vary_x_samples125, sigma125)
fig125 = plt.figure()
plt.plot(xaxis125, yaxis125)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-8 to 1e-12')
plt.ylabel('-2 ln likelihood L(T|C_3)')
plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "vary_x_samples12 = np.logspace(-8, -12, num=40) #num = 40"
print "sigma12 = np.logspace(-21, -23, num=40)"
print "LogLikehood_wNoise_1e20(vary_x_samples12, sigma12)"
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [72]:
print "vary_x_samples125 = np.logspace(-8, -12, num=40) #num = 40"
print "sigma125 = np.logspace(-21, -23, num=40)"
print "LogLikehood_wNoise_1e21(vary_x_samples125, sigma125)"
print vary_x_samples125
print LogLikehood_wNoise_1e21(vary_x_samples125, sigma125)
In [73]:
#
# Plot noise sigma from e-22 to e-24
#
def LogLikehood_wNoise_1e22(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e22)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e22)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
vary_x_samples123 = np.logspace(-8, -12, num=40) #num = 40
sigma123 = np.logspace(-22, -24, num=40)
xaxis123 = vary_x_samples123
yaxis123 = LogLikehood_wNoise_1e22(vary_x_samples123, sigma123)
fig123 = plt.figure()
plt.plot(xaxis123, yaxis123)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-8 to 1e-12')
plt.ylabel('-2 ln likelihood L(T|C_3)')
plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "vary_x_samples123 = np.logspace(-8, -12, num=40) #num = 40"
print "sigma123 = np.logspace(-22, -24, num=40)"
print "LogLikehood_wNoise_1e22(vary_x_samples123, sigma123)"
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [74]:
print vary_x_samples123
print LogLikehood_wNoise_1e22(vary_x_samples123, sigma123)
In [75]:
#
# Plot noise sigma from e-23 to e-25
#
def LogLikehood_wNoise_1e23(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e23)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e23)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
vary_x_samples13 = np.logspace(-8, -12, num=40) #num = 40
sigma13 = np.logspace(-23, -25, num=40)
xaxis13 = vary_x_samples13
yaxis13 = LogLikehood_wNoise_1e23(vary_x_samples13, sigma13)
fig13 = plt.figure()
plt.plot(xaxis13, yaxis13)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-8 to 1e-12')
plt.ylabel('-2 ln likelihood L(T|C_3)')
plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "vary_x_samples13 = np.logspace(-8, -12, num=40) #num = 40"
print "sigma13 = np.logspace(-23, -25, num=40)"
print "LogLikehood_wNoise_1e23(vary_x_samples13, sigma13)"
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [76]:
print "vary_x_samples13 = np.logspace(-8, -12, num=40) #num = 40"
print "sigma13 = np.logspace(-23, -25, num=40)"
print "LogLikehood_wNoise_1e23(vary_x_samples13, sigma13)"
print vary_x_samples13
print LogLikehood_wNoise_1e23(vary_x_samples13, sigma13)
In [77]:
#
# Plot noise sigma from e-24 to e-26
#
def LogLikehood_wNoise_1e24(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e24)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e24)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
vary_x_samples14 = np.logspace(-8, -12, num=40) #num = 40
sigma14 = np.logspace(-24, -26, num=40)
xaxis14 = vary_x_samples14
yaxis14 = LogLikehood_wNoise_1e24(vary_x_samples14, sigma14)
fig14 = plt.figure()
plt.plot(xaxis14, yaxis14)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-8 to 1e-12')
plt.ylabel('-2 ln likelihood L(T|C_3)')
plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "vary_x_samples14 = np.logspace(-8, -12, num=40) #num = 40"
print "sigma14 = np.logspace(-24, -26, num=40)"
print "LogLikehood_wNoise(vary_x_samples14, sigma14)"
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [78]:
print vary_x_samples14
print LogLikehood_wNoise_1e24(vary_x_samples14, sigma14)
In [79]:
print hatC3 # anafast generated C_l for l = 3 from sky map
print 7*hatC3
print C3 # CAMB generated C_3, Boltzmann code output
print alm3_squared
In [80]:
print tempval.shape
In [81]:
np.logspace(-8, -10, num=40)
Out[81]:
In [82]:
# Plot C_3 from e-08 to e-10
# Plot noise sigma from e-22 to e-23
#
def LogLikehood_wNoise_1e22(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e22)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e22)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
vary_x_samples15 = np.logspace(-8, -10, num=40) #num = 40
sigma15 = np.logspace(-22, -23, num=40)
xaxis15 = vary_x_samples15
yaxis15 = LogLikehood_wNoise_1e22(vary_x_samples15, sigma15)
fig15 = plt.figure()
plt.plot(xaxis15, yaxis15)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-8 to 1e-10')
plt.ylabel('-2 ln likelihood L(T|C_3)')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [83]:
print vary_x_samples15
print LogLikehood_wNoise_1e22(vary_x_samples15, sigma15)
In [84]:
np.logspace(-8.3, -10, num=40)
Out[84]:
In [85]:
# Plot C_3 from 7.07945784e-09 to 1.00000000e-10
# Plot noise sigma from e-22 to e-23
#
def LogLikehood_wNoise_1e22(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e22)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e22)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
vary_x_samples16 = np.logspace(-8.7, -10, num=40) #num = 40
sigma16 = np.logspace(-22, -24, num=40)
xaxis16 = vary_x_samples16
yaxis16 = LogLikehood_wNoise_1e22(vary_x_samples16, sigma16)
fig16 = plt.figure()
plt.plot(xaxis16, yaxis16)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-9 to 1e-10')
plt.ylabel('-2 ln likelihood L(T|C_3)')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
vary_x_samples66 = np.logspace(-8.7, -11, num=40) #num = 40
sigma66 = np.logspace(-22, -23, num=40)
xaxis66 = vary_x_samples66
yaxis66 = LogLikehood_wNoise_1e22(vary_x_samples66, sigma66)
fig66 = plt.figure()
plt.plot(xaxis66, yaxis66)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-9 to 1e-10')
plt.ylabel('-2 ln likelihood L(T|C_3)')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [86]:
# Plot C_3 from 7.07945784e-09 to 1.00000000e-10
# Plot noise sigma from e-22 to e-23
#
def LogLikehood_wNoise_1e22(param, sig):
# param is our parameter, C_3
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e22)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e22)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
vary_x_samples17 = np.logspace(-8.1, -10, num=40) #num = 40
sigma17 = np.logspace(-22, -23, num=40)
xaxis17 = vary_x_samples17
yaxis17 = LogLikehood_wNoise_1e22(vary_x_samples17, sigma17)
fig16 = plt.figure()
plt.plot(xaxis17, yaxis17)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-9 to 1e-10')
plt.ylabel('-2 ln likelihood L(T|C_3)')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
vary_x_samples177 = np.logspace(-8.1, -10, num=40) #num = 40
sigma177 = np.logspace(-22, -24, num=40)
xaxis177 = vary_x_samples177
yaxis177 = LogLikehood_wNoise_1e22(vary_x_samples177, sigma177)
fig167 = plt.figure()
plt.plot(xaxis177, yaxis177)
plt.xscale("log")
plt.xlabel('C_3 values, num=40, logspace from 1e-9 to 1e-10')
plt.ylabel('-2 ln likelihood L(T|C_3)')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
print "|a30|^2 is green, |a32|^2 is red, |a33|^2 is magenta, 7*\hat{C}_3 is yellow, \hat{C}_3 is black"
In [87]:
param = vary_x_samples17
sig = sigma17
Sij = param[:, None, None] * norm_matrix[1][None, :, :]
newSij = (1e22)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e22)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
np.linalg.solve(Cij[i], tempp)
model_fit_terms + logdetC[1] + Npix2pi
Out[87]:
In [88]:
print param.shape
print sig.shape
print Sij.shape
print Nij.shape
print norm_matrix[1].shape
print id_mat.shape
print model_fit_terms.shape
print np.linalg.solve(Cij[i], tempp).shape
print np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
print np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ]).shape
In [89]:
invvv = np.array([np.linalg.solve(Cij[i], np.identity(3072)) for i in range(Cij.shape[0]) ])
#print invvv
In [90]:
# test of
# model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
#
model_fit_terms22 = np.einsum('j,ijk,k->i',tempp, invvv, tempp)
print model_fit_terms22.shape
print model_fit_terms22
In [91]:
vary_x_samples18 = np.logspace(-8.1, -10, num=5) #num = 5
sigma18 = np.logspace(-22, -23, num=5)
In [92]:
def LogLikehood_wNoise_1e22_contour(param, sig):
# param is our parameter, C_3
Sij = param * norm_matrix[1][None, :, :]
newSij = (1e22)*Sij # multiply S_ij by 1e12
Nij = sig * id_mat[None, :, :]
newNij = (1e22)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
In [93]:
arr_plot1 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in vary_x_samples18] for j in sigma18])
In [94]:
print arr_plot1
In [95]:
print arr_plot1.shape
In [96]:
print arr_plot1.reshape(5,5)
In [97]:
vary_x_samples19 = np.logspace(-8.1, -10, num=10) #num = 10
sigma19 = np.logspace(-22, -23, num=10)
print vary_x_samples19
print sigma19
In [98]:
arr_plot2 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in vary_x_samples19] for j in sigma19])
In [99]:
print arr_plot2.shape
#print arr_plot2
In [100]:
import pylab as pb
import numpy as np
import matplotlib.pyplot as plt
zz = arr_plot2.reshape(10,10)
plt.figure()
CS = plt.contour(vary_x_samples19, sigma19, zz)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
In [101]:
vary_x_samples19 = np.logspace(-8.1, -10, num=10) #num = 10
sigma19 = np.logspace(-22, -23, num=10)
xxx3 = vary_x_samples19[:5]
yyy3 = sigma19[:5]
zzz3 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx3] for j in yyy3])
In [102]:
zzz3_reshaped = zzz3.reshape(5,5)
plt.figure()
CS = plt.contour(xxx3, yyy3, zzz3_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx3
print yyy3
print zzz3_reshaped
In [103]:
vary_x_samples19 = np.logspace(-8.1, -10, num=10) #num = 10
sigma19 = np.logspace(-22, -23, num=10)
xxx3 = vary_x_samples19[:3]
yyy3 = sigma19[:3]
zzz3 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx3] for j in yyy3])
zzz3_reshaped = zzz3.reshape(3,3)
plt.figure()
CS = plt.contour(xxx3, yyy3, zzz3_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx3
print yyy3
print zzz3
In [ ]:
In [104]:
vary_x_samples33 = np.logspace(-7, -11, num=10) #num = 10
sigma33 = np.logspace(-22, -23, num=10)
xxx4 = vary_x_samples33[:5]
yyy4 = sigma33[:5]
zzz4 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx4] for j in yyy4])
In [105]:
zzz4_reshaped = zzz4.reshape(5,5)
plt.figure()
CS = plt.contour(xxx4, yyy4, zzz4_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx4
print yyy4
print zzz4_reshaped
In [106]:
vary_x_samples44 = np.logspace(-7, -11, num=20) #num = 10
sigma44 = np.logspace(-22, -23, num=20)
xxx44 = vary_x_samples44[:4]
yyy44 = sigma44[:4]
zzz44 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx44] for j in yyy44])
zzz44_reshaped = zzz44.reshape(4,4)
plt.figure()
CS = plt.contour(xxx44, yyy44, zzz44_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx44
print yyy44
print zzz44
In [107]:
vary_x_samples55 = np.logspace(-7, -11, num=10) #num = 10
sigma55 = np.logspace(-22, -23, num=10)
xxx55 = vary_x_samples55
yyy55 = sigma55
zzz55 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx55] for j in yyy55])
zzz55_reshaped = zzz55.reshape(10,10)
plt.figure()
CS = plt.contour(xxx55, yyy55, zzz55_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx55
print yyy55
print zzz55_reshaped
In [ ]:
In [108]:
vary_x_samples20 = np.logspace(-7, -10, num=10) #num = 10
sigma20 = np.logspace(-22, -24, num=10)
print vary_x_samples20
print sigma20
In [109]:
arr_plot3 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in vary_x_samples20] for j in sigma20])
In [110]:
reshape_arr_plot3 = arr_plot3.reshape(10,10)
plt.figure()
CS = plt.contour(vary_x_samples19, sigma19, reshape_arr_plot3)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
In [ ]:
In [111]:
vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
sigma555 = np.logspace(-22, -23, num=15)
xxx555 = vary_x_samples555
yyy555 = sigma555
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(15,15)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
#print xxx555
#print yyy555
#print zzz555_reshaped
In [112]:
vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
sigma555 = np.logspace(-22, -23, num=15)
xxx555 = vary_x_samples555[:7]
yyy555 = sigma555[:7]
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(7,7)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx555
print yyy555
print zzz555_reshaped
In [113]:
vary_x_samples88 = np.logspace(-6, -10, num=15) #num = 10
sigma88 = np.logspace(-22, -23, num=15)
xxx88 = vary_x_samples88
yyy88 = sigma88
zzz88 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx88] for j in yyy88])
zzz88_reshaped = zzz88.reshape(15, 15)
plt.figure()
CS = plt.contour(xxx88, yyy88, zzz88_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
#print xxx88
#print yyy88
#print zzz88_reshaped
In [114]:
vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
sigma555 = np.logspace(-22, -23, num=15)
xxx555 = vary_x_samples555[7:]
yyy555 = sigma555[7:]
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(8,8)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx555
print yyy555
print zzz555_reshaped
In [115]:
vary_x_samples555 = np.logspace(-7, -11, num=20) #num = 10
sigma555 = np.logspace(-22, -23, num=20)
xxx555 = vary_x_samples555[:10]
yyy555 = sigma555[:10]
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(10,10)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx555
print yyy555
print zzz555_reshaped
In [116]:
vary_x_samples555 = np.logspace(-7, -11, num=20) #num = 10
sigma555 = np.logspace(-22, -23, num=20)
xxx555 = vary_x_samples555[10:]
yyy555 = sigma555[10:]
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(10,10)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx555
print yyy555
print zzz555_reshaped
In [117]:
# linear values, based on plot 115
# 3e-10 vs 1-1.5e-23
# 5e-10 vs 1.5-2.5e-23
import matplotlib.pyplot as plt
vary_x_samples555 = np.linspace(3e-10, 5e-10, num=20)
sigma555 = np.linspace(1e-23, 2.5e-23, num=20)
xxx555 = vary_x_samples555
yyy555 = sigma555
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(20,20)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx555
print yyy555
print zzz555_reshaped
In [118]:
def LogLikehood_wNoise_1e22_line(param, sig):
# param is our parameter, C_3
Sij = param * norm_matrix[1][None, :, :]
newSij = (1e22)*Sij # multiply S_ij by 1e12
Nij = sig[:, None, None] * id_mat[None, :, :]
newNij = (1e22)*Nij
# Format 7/4pi * param * P_3(M) where param is the parameter we vary, C_l
# Sij.shape = (20, 3072, 3072)
Cij = newSij + newNij
#invCij = np.linalg.inv(Cij)
logdetC = np.linalg.slogdet(Cij) # returns sign and determinant; use logdetC[1]
# model_fit_terms = m^T C^-1 m
#
# model_fit_terms = np.array([np.dot(tempval.T , np.dot(invCij[i] , tempval) )
# for i in range(invCij.shape[0])])
#
model_fit_terms = np.array([np.dot(tempp.T , np.linalg.solve(Cij[i], tempp) ) for i in range(Cij.shape[0]) ])
return model_fit_terms + logdetC[1] + Npix2pi
In [119]:
# hold C_3 value, vary sigma^2 parameters
#
# Based on plot 106
# vary_x_samples44 = np.logspace(-7, -11, num=20) #num = 10
# sigma44 = np.logspace(-22, -23, num=20)
#
# xxx44 = vary_x_samples44[:4]
# yyy44 = sigma44[:4]
#
#
# Hold C3 to 4e-08, vary sigma^2 from 1
#
#
C3_sample1 = 4e-8
sigma2samples1 = np.linspace(1e-22, 6e-23, num=40)
#
#
yax1 = LogLikehood_wNoise_1e22_line(C3_sample1, sigma2samples1)
#
fig1234 = plt.figure()
plt.plot(sigma2samples1, yax1)
plt.xlabel('Noise sigma2 linspace from 1e-22 to 2.5e-23')
plt.ylabel('-2 ln likelihood L(T|C_3), C_3 value 4e-8, ')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
In [120]:
# hold C_3 value, vary sigma^2 parameters
#
# based on plot 112 (or 113)
#vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
#sigma555 = np.logspace(-22, -23, num=15)
#
#xxx555 = vary_x_samples555[:7]
#yyy555 = sigma555[:7]
#
#
#
C3_sample2 = 3e-08
sigma2samples2 = np.linspace(7e-23, 9e-23, num=40)
#
#
yax2 = LogLikehood_wNoise_1e22_line(C3_sample2, sigma2samples2)
#
fig1235 = plt.figure()
plt.plot(sigma2samples2, yax2)
plt.xlabel('Noise sigma2 linspace from 7e-23 to 9e-23')
plt.ylabel('-2 ln likelihood L(T|C_3), C_3 value 3e-08, ')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
In [121]:
# hold C_3 value, vary sigma^2 parameters
#
# based on plot 112 (or 113)
#vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
#sigma555 = np.logspace(-22, -23, num=15)
#
#xxx555 = vary_x_samples555[:7]
#yyy555 = sigma555[:7]
#
#
#
C3_sample2a = 3e-08
sigma2samples2a = np.linspace(2e-23, 9e-23, num=40)
#
#
yax2a = LogLikehood_wNoise_1e22_line(C3_sample2a, sigma2samples2a)
#
fig1235 = plt.figure()
plt.plot(sigma2samples2a, yax2a)
plt.xlabel('Noise sigma2 linspace from 2e-23 to 9e-23')
plt.ylabel('-2 ln likelihood L(T|C_3), C_3 value 3e-08, ')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
In [122]:
# hold C_3 value, vary sigma^2 parameters
#
# based on plot 114/115
#vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
#sigma555 = np.logspace(-22, -23, num=15)
#
#xxx555 = vary_x_samples555[7:]
#yyy555 = sigma555[7:]
#
#
#
C3_sample3 = 3e-10
sigma2samples3 = np.linspace(1e-23, 2e-23, num=40)
#
#
yax3 = LogLikehood_wNoise_1e22_line(C3_sample3, sigma2samples3)
#
fig1235 = plt.figure()
plt.plot(sigma2samples3, yax3)
plt.xlabel('Noise sigma2 linspace from 1e-23 to 2 e-23')
plt.ylabel('-2 ln likelihood L(T|C_3), C_3 value 3e-10, ')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
In [123]:
# hold C_3 value, vary sigma^2 parameters
#
# based on plot 114/115
#vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
#sigma555 = np.logspace(-22, -23, num=15)
#
#xxx555 = vary_x_samples555[7:]
#yyy555 = sigma555[7:]
#
#
#
C3_sample4 = 4e-10
sigma2samples4 = np.linspace(1e-23, 2.5e-23, num=40)
#
#
yax4 = LogLikehood_wNoise_1e22_line(C3_sample4, sigma2samples4)
#
fig1236 = plt.figure()
plt.plot(sigma2samples4, yax4)
plt.xlabel('Noise sigma2 linspace from 1e-23 to 2.5 e-23')
plt.ylabel('-2 ln likelihood L(T|C_3), C_3 value 5e-10, ')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
In [124]:
# hold C_3 value, vary sigma^2 parameters
#
# based on plot 114/115
#vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
#sigma555 = np.logspace(-22, -23, num=15)
#
#xxx555 = vary_x_samples555[7:]
#yyy555 = sigma555[7:]
#
#
#
C3_sample4 = 5e-10
sigma2samples4 = np.linspace(1e-23, 2.5e-23, num=40)
#
#
yax4 = LogLikehood_wNoise_1e22_line(C3_sample4, sigma2samples4)
#
fig1236 = plt.figure()
plt.plot(sigma2samples4, yax4)
plt.xlabel('Noise sigma2 linspace from 1e-23 to 2.5 e-23')
plt.ylabel('-2 ln likelihood L(T|C_3), C_3 value 5e-10, ')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
In [125]:
# linear values, based on plot
# 4e-08 vs 1e-22 vs 6e-23
# 3e-08 vs 4.5-7e-23
import matplotlib.pyplot as plt
vary_x_samples555 = np.linspace(2e-08, 5e-08, num=15) #2e-08 to 5e-08
sigma555 = np.linspace(1e-22, 7e-23, num=15) #1e-22 to 7e-23
xxx555 = vary_x_samples555
yyy555 = sigma555
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(15,15)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx555
print yyy555
print zzz555_reshaped
In [126]:
# hold C_3 value, vary sigma^2 parameters
#
# based on plot 112 (or 113)
#vary_x_samples555 = np.logspace(-7, -11, num=15) #num = 10
#sigma555 = np.logspace(-22, -23, num=15)
#
#xxx555 = vary_x_samples555[:7]
#yyy555 = sigma555[:7]
#
#
#
C3_sample2a = 7e-08
sigma2samples2a = np.linspace(1e-23, 9e-23, num=40)
#
#
yax2a = LogLikehood_wNoise_1e22_line(C3_sample2a, sigma2samples2a)
#
fig1235 = plt.figure()
plt.plot(sigma2samples2a, yax2a)
plt.xlabel('Noise sigma2 linspace from 2e-23 to 9e-23')
plt.ylabel('-2 ln likelihood L(T|C_3), C_3 value 3e-08, ')
#plt.axvline(x =1.25873313e-09, color = 'g') # a30 = 1.25873313e-09
# plt.plt.axvline(x = 4.79243265e-17 ) # a31 = 4.79243265e-17
#plt.axvline(x =5.86460794e-13, color = 'r') # a32 = 5.86460794e-13
#plt.axvline(x =7.27453196e-10, color = 'm') # a33 = 7.27453196e-10
#plt.axvline(x =4.5454678088e-09, color = 'y') # 7*\hat{C}_3 = 4.5454678088e-09
#plt.axvline(x =6.49352544114e-10, color = 'k') # \hat{C}_3 = 6.49352544114e-10
plt.grid()
In [127]:
# linear values, based on plot 115
# 3e-10 vs 1-1.5e-23
# 5e-10 vs 1.5-2.5e-23
import matplotlib.pyplot as plt
vary_x_samples555 = np.linspace(2e-10, 6e-10, num=20) # use 2e-10 to 6e-10
sigma555 = np.linspace(1e-23, 3e-23, num=20)
xxx555 = vary_x_samples555
yyy555 = sigma555
zzz555 = np.array([[LogLikehood_wNoise_1e22_contour(np.asarray(i), np.asarray(j)) for i in xxx555] for j in yyy555])
zzz555_reshaped = zzz555.reshape(20,20)
plt.figure()
CS = plt.contour(xxx555, yyy555, zzz555_reshaped)
plt.clabel(CS, inline=1, fontsize=10)
pb.show()
print xxx555
print yyy555
print zzz555_reshaped
In [ ]:
In [ ]:
In [ ]: