In [1]:
import numpy as np
from astropy.io import fits, ascii
from astropy.coordinates import SkyCoord, Angle
from astropy import units as u
from astropy import constants as const
from astropy import wcs
from astropy.convolution import Gaussian2DKernel, convolve
from astropy.table import Table, Column, join, hstack
import json
import scipy.stats, scipy
import matplotlib.pyplot as plt
import pymultinest
from pymultinest.solve import solve
import math, os
import scipy
import scipy.interpolate
import scipy.ndimage
import time
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
plt.rc('figure', figsize=(10, 6))
In [2]:
#data_folder = "/Volumes/Raph500/GEP"
DAT_FOL = './' #"/Users/rs548/GitHub/GEP"
OUT_FOL='./data/fakes/'
In [3]:
def make_fake_image(n_gaussians,
psf_size = 2., #pixels
image_size = 10, #pixels
buffer = 5, #pixels
background = 0., #uJy
flux_range = [10., 100.], #uJy
depth = 0., #uJy
save = False
):
"""
Make a fake image with a given number of Gaussians
Randomly choose n fluxes and put n Gaussians with those fluxes at
random positions.
This will be used to test algorithms for calculating the
number of objects.
Inputs
------
n_gaussians: int
Number of gaussian objects to add to image
psf_size: float
Point Spread Function (PSF) standard deviation in pixel scale.
image_size: int
The number of pixels in the central square region.
buffer: int
The number of pixels to border the central region.
background: float
Background to add to every pixel.
flux_range: list of two floats
Lower and upper flux to uniformly sample from in uJy.
depth: float
Standard deviation of Gaussian noise to add to image.
Returns
-------
image: np.array
2D array with n gaussians randomly placed around central pixel space
"""
psf = Gaussian2DKernel(psf_size,x_size=101,y_size=101)
image = np.full([image_size + 2*buffer,image_size + 2*buffer], 0.)
for gal in np.arange(n_gaussians):
gal_x = np.random.choice(np.arange(image_size))
gal_y = np.random.choice(np.arange(image_size))
gal_flux = np.random.uniform(flux_range[0],flux_range[1])
image[gal_y + buffer, gal_x + buffer] = gal_flux
#print("Object {}: x = {}, y = {}, flux = {}".format(gal, gal_x, gal_y, gal_flux))
image = convolve(image, psf)
# Add background
image = image + background
# Add noise
noise = depth * np.random.randn(image.shape[0], image.shape[1])
image = image + noise
# Save image with key info
if save:
np.save(OUT_FOL + 'fake_x{}_y{}_f{}_t{}.npy'.format(gal_x, gal_y, gal_flux, time.time()), image)
return image
test_image = make_fake_image(3, save=True, depth=0.1)
plt.imshow(test_image)
Out[3]:
In [4]:
def make_model_image(n_gaussians,
positions,
fluxes, #uJy
psf_size = 2., #pixels
image_size = 10, #pixels
buffer = 5, #pixels
noise = None,
background = 0., #uJy
save = False
):
"""
Make a model image with a given number of Gaussians
Randomly choose n fluxes and put n Gaussians with those fluxes at
random positions.
This will be used to test algorithms for calculating the
number of objects.
Inputs
------
n_gaussians: int
Number of gaussian objects to add to image
psf_size: float
Point Spread Function (PSF) standard deviation in pixel scale.
image_size: int
The number of pixels in the central square region.
buffer: int
The number of pixels to border the central region.
background: float
Background to add to every pixel.
Returns
-------
image: np.array
2D array with model image
"""
#print("make model called")
psf = Gaussian2DKernel(psf_size,x_size=101,y_size=101)
model_image = np.full([image_size + 2*buffer,image_size + 2*buffer], 0.)
#print("before loop")
for n, gal in enumerate(positions):
gal_x = gal[0]
gal_y = gal[1]
#print('gal_x: {}, gal_y: {}'.format(gal_x,gal_y))
gal_flux = fluxes[n]
#print(gal_flux)
model_image[gal_y + buffer, gal_x + buffer] = gal_flux
#print("loop {} done".format(n))
model_image = convolve(model_image, psf)
# Add background
model_image = model_image + background
# Add noise
if noise != None:
noise = noise * np.random.randn(model_image.shape[0], model_image.shape[1])
model_image = model_image + noise
# Save image with key info
if save:
np.save(OUT_FOL + 'fake_x{}_y{}_f{}_t{}.npy'.format(gal_x, gal_y, gal_flux, time.time()), image)
#print("Make model finished")
return model_image
test_model = make_model_image(3, [[9,0], [8,9], [4,8]], [75., 29., 89.], noise = 0.1)
plt.imshow(test_model)
Out[4]:
In [6]:
# probability function, taken from the eggbox problem.
def myprior(cube):
return cube * 10 * np.pi
def myloglike(cube):
chi = (np.cos(cube / 2.)).prod()
return (2. + chi)**5
# number of dimensions our problem has
parameters = ["x", "y"]
#parameters = ["x", "y", "z", "j"]
n_params = len(parameters)
# name of the output files
#prefix = "chains/3-"
prefix = "chains/{}-".format(n_params + 1)
# run MultiNest
result = solve(LogLikelihood=myloglike, Prior=myprior,
n_dims=n_params, outputfiles_basename=prefix, verbose=True)
print()
print('evidence: %(logZ).1f +- %(logZerr).1f' % result)
print()
print('parameter values:')
for name, col in zip(parameters, result['samples'].transpose()):
print('%15s : %.3f +- %.3f' % (name, col.mean(), col.std()))
# make marginal plots by running:
# $ python multinest_marginals.py chains/3-
# For that, we need to store the parameter names:
with open('{}params.json'.format(prefix), 'w') as f:
json.dump(parameters, f, indent=2)
In [9]:
def general_prior(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u):
"""
Return position in D-dimensional parameter space according to
location in D-dimensional unit hypercube.
I.e. if one randomly samples from the uniformly distributed unit
hypercube and passes that sample to this function the result will
be randomly sampled from the parameter prior.
The prior is defined by a transformation of the D-dimensional hypercube
Inputs
------
cube: array of floats
position in unit hypercube of random sample
Returns
-------
prior: function object
The prior function that turns positions in the unit hypercube to
positions in parameter space.
"""
#print("general_prior called")
def prior(cube):
"""Return position in parameter space given position in unit hypercube
"""
#print("prior called")
out_cube = cube #.copy()
for n in np.arange(n_gaussians):
out_cube[0 + 3*n] = x_l + out_cube[0 + 3*n] * (x_u - x_l) # x pixel
out_cube[1 + 3*n] = y_l + out_cube[1 + 3*n] * (y_u - y_l) # y pixel
out_cube[2 + 3*n] = f_l + out_cube[2 + 3*n] * (f_u - f_l) # flux
return out_cube
return prior
n_gaussians = 3
x_l = 0
x_u = 10
y_l = 0
y_u = 10
f_l = 10.
f_u = 100.
truth = [0, 9, 75., 8, 9, 29., 4, 8, 89.]
truth_cube_close = np.array([0.,0.9,0.7222, 0.8, 0.9, 0.211, 0.4, 0.8, 0.878])
general_prior(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u)(truth_cube_close)
Out[9]:
In [10]:
def chi_squared(image, model, error):
"""Return chi-squared for a given image and model
Inputs
------
image: np.array
The image to fit
model: np.array
The model image
"""
#Ignore constant term
chi_squared = -0.5 * np.sum((image - model)**2/ error**2 )
return chi_squared
In [11]:
def general_loglike(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u, image, error=0.1):
"""Return Log10 liklihood for model made from unit hypercube parameter space
"""
#print("general_loglike called")
def loglike(cube):
#print("loglike called")
# First get parameter values
#print('like prior calling {}'.format(n_gaussians))
#prior = general_prior(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u)
#parameters = prior(cube)
parameters = cube.copy()
#print(len(parameters))
#for n in np.arange(int(len(parameters)/3)):
# print(n)
# parameters[0 + 3*n] = x_l + parameters[0 + 3*n] * (x_u - x_l) # x pixel
# parameters[1 + 3*n] = y_l + parameters[1 + 3*n] * (y_u - y_l) # y pixel
# parameters[2 + 3*n] = f_l + parameters[2 + 3*n] * (f_u - f_l) # flux
positions = []
fluxes = []
for gal in np.arange(int(len(parameters)/3)):
positions += [[ int(parameters[0 + 3*gal] ), int( parameters[1 + 3*gal] ) ] ]
fluxes += [parameters[2 + 3*gal]]
# then generate model image
#print(n_gaussians, positions, fluxes)
#print('before model')
model = make_model_image(n_gaussians, positions, fluxes)
#plt.imshow(model)
# Then calculate log chi squared
#print('before chisq')
loglike = chi_squared(image, model, error)
#print('end of loglike')
return loglike
return loglike
#[[9,0], [8,9], [4,8]], [75., 29., 89.]
n_gaussians = 3
x_l = 0
x_u = 10
y_l = 0
y_u = 10
f_l = 10.
f_u = 100.
image = test_model
truth = [0, 9, 75., 8, 9, 29., 4, 8, 89.]
truth_cube_far = np.array([0.1,0.1,0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
truth_cube_far = [0.1,0.1,0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
truth_cube_close = [0.,0.9,0.66, 0.8, 0.9, 0.21, 0.4, 0.8, 0.66]
print("close: {}".format(general_loglike(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u, image)(truth_cube_close)))
print("far: {}".format(general_loglike(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u, image)(truth_cube_far)))
In [12]:
# run MultiNest for simple test
n_gaussians = 2
x_l = 0
x_u = 10
y_l = 0
y_u = 10
f_l = 10.
f_u = 100.
prefix = "TEST_{}_{}".format(n_gaussians, time.time())
parameters = [ "x", "y", "flux", "x2", "y2", "flux2"]
n_params = len(parameters)
#image = make_model_image(3, [[9,0], [8,9], [4,8]], [75., 29., 89.], noise = 0.1)
image = make_model_image(1, [[9,0]], [75.], noise = 0.1)
prior = general_prior(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u)
loglike = general_loglike(n_gaussians, x_l, x_u, y_l, y_u, f_l, f_u, image)
In [13]:
result = solve(LogLikelihood=loglike,
Prior=prior,
n_dims=n_params, outputfiles_basename=prefix, verbose=True)
In [14]:
result
Out[14]:
In [ ]:
# run MultiNest for simple test
x_l = 0
x_u = 10
y_l = 0
y_u = 10
f_l = 10.
f_u = 100.
image = test_model
for n in np.arange(1,6):
prefix = OUT_FOL + "chains/TEST_{}d_{}".format(n,time.time())
prior = general_prior(n, x_l, x_u, y_l, y_u, f_l, f_u)
loglike = general_loglike(n, x_l, x_u, y_l, y_u, f_l, f_u, image)
parameters = []
for ob in np.arange(n):
parameters += ["x_".format(ob), "y_".format(ob), "f_".format(ob)]
with open('{}params.json'.format(prefix), 'w') as f:
json.dump(parameters, f, indent=2)
result = solve(LogLikelihood=loglike,
Prior=prior,
n_dims=n*3, outputfiles_basename=prefix, verbose=True)
np.save(OUT_FOL + "result_{}d_{}".format(n,time.time()), result)
print(result)