``````

In [1]:

%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from bigmali.grid import Grid
from bigmali.likelihood import BiasedLikelihood
from bigmali.prior import TinkerPrior
from bigmali.hyperparameter import get
from scipy.stats import lognorm
from time import time
from math import sqrt

def p1(lobs, lum, sigma):
return fast_lognormal(lum, sigma, lobs)

def p2(lum, mass, a1, a2, a3, a4, S, z):
mu_lum = np.exp(a1) * ((mass / a3) ** a2) * ((1 + z) ** (a4))
return fast_lognormal(mu_lum, S, lum)

def p3(mass, z, prior):
return prior.fetch(z).pdf(mass)

def q1(lum, lobs, sigma):
return fast_lognormal(lobs, sigma, lum)

def q2(mass, lum, a1, a2, a3, a4, S, z):
mu_mass = a3 * (lum / (np.exp(a1) * (1 + z) ** a4)) ** (1 / a2)
return fast_lognormal(mu_mass, S, mass)

def fast_lognormal(mu, sigma, x):
return  (1/(x * sigma * np.sqrt(2 * np.pi))) * np.exp(- 0.5 * (np.log(x) - np.log(mu)) ** 2 / sigma ** 2)

def log_multi_importance_sampling(lum_obs, zs, a1, a2, a3, a4, S, prior, sigma=0.05, nsamples = 10**4):
n = len(lum_obs)
if n != len(zs):
raise Error('lum_obs and zs must be same length!')
rev_S = 5.6578015811698101 * S
result = 0
for i in xrange(n):
lums = lognorm(sigma, scale=lum_obs[i]).rvs(size=nsamples)
mu_mass = a3 * (lums / (np.exp(a1) * (1 + zs[i]) ** a4)) ** (1 / a2)
masses = lognorm(rev_S, scale=mu_mass).rvs()
result += np.log(np.sum((p1(lum_obs[i], lums, sigma) * \
p2(lums, masses, a1, a2, a3, a4, S, zs[i]) * \
p3(masses, zs[i], prior)) / \
(q1(lums, lum_obs[i], sigma) *\
q2(masses, lums, a1, a2, a3, a4, rev_S, zs[i]))) /\
nsamples)
return result

``````
``````

In [2]:

a1, a2, a3, a4, S = get()
g = Grid()
z = g.redshifts[3]
prior = TinkerPrior(g)

print fast_lognormal(20,10,30)
print p1(20,19,2)
print p2(1e4, 1e11, a1, a2, a3, a4, S, z)
print p3(1e11, z, prior)
print q1(20,19,2)
print q2(1e11, 1e4, a1, a2, a3, a4, S, z)

``````
``````

0.00132871493565
0.00997027749323
1.65626971034e-09
1.40762668993e-12
0.00997027749323
9.18141342255e-51

``````
``````

In [3]:

lum_obs = data.lum_obs.as_matrix()[:3]
zs =  data.z.as_matrix()[:3]
for i,z in enumerate(zs):
zs[i] = g.snap(z)
nsamples = 10000

print lum_obs
print zs
print log_multi_importance_sampling(lum_obs, zs, a1, a2, a3, a4, S, prior, nsamples=nsamples)

``````
``````

[ 13748.7935573    8259.73850042  18382.3049543 ]
[ 2.21052632  2.02631579  2.02631579]
-33.1821909033

``````
``````

In [4]:

%time log_multi_importance_sampling(lum_obs, zs, a1, a2, a3, a4, S, prior, nsamples=nsamples)

``````
``````

CPU times: user 27.5 ms, sys: 1.92 ms, total: 29.4 ms
Wall time: 29.2 ms

Out[4]:

-33.184572883160939

``````

## Making New Dataset

``````

In [16]:

mass = []
zs = data.z.as_matrix()
n = len(zs)
for z in zs:
mass.append(*prior.fetch(z).rvs(1))
mass = np.array(mass)

mean_L = np.exp(a1)*(mass / a3)**a2 * (1 + zs) ** a4
lum = lognorm(S, scale=mean_L).rvs()
lum_obs = lognorm(0.05, scale=lum).rvs()

``````
``````

In [18]:

f = open('mock_data.txt', 'w')
for i in xrange(len(mass)):
f.write('{} {}\n'.format(zs[i], lum_obs[i]))
f.close()

``````
``````

In [20]:

a3

``````
``````

Out[20]:

235000000000000.0

``````
``````

In [21]:

a1

``````
``````

Out[21]:

10.747809151611289

``````
``````

In [ ]:

``````