This example demonstrates how to use ellipsoidal nested rejection sampling [1] to sample from the posterior distribution for a logistic model fitted to model-simulated data.
[1] "A nested sampling algorithm for cosmological model selection", Pia Mukherjee, David Parkinson and Andrew R. Liddle, arXiv:astro-ph/0508461v2.
First create fake data.
In [1]:
import pints
import pints.toy as toy
import numpy as np
import matplotlib.pyplot as plt
# Load a forward model
model = toy.LogisticModel()
# Create some toy data
r = 0.015
k = 500
real_parameters = [r, k]
times = np.linspace(0, 1000, 100)
signal_values = model.simulate(real_parameters, times)
# Add independent Gaussian noise
sigma = 10
observed_values = signal_values + pints.noise.independent(sigma, signal_values.shape)
# Plot
plt.plot(times,signal_values,label = 'signal')
plt.plot(times,observed_values,label = 'observed')
plt.xlabel('Time')
plt.ylabel('Values')
plt.legend()
plt.show()
Create the nested sampler that will be used to sample from the posterior.
In [2]:
# Create an object with links to the model and time series
problem = pints.SingleOutputProblem(model, times, observed_values)
# Create a log-likelihood function (adds an extra parameter!)
log_likelihood = pints.GaussianLogLikelihood(problem)
# Create a uniform prior over both the parameters and the new noise variable
log_prior = pints.UniformLogPrior(
[0.01, 400, sigma * 0.5],
[0.02, 600, sigma * 1.5])
# Create a nested ellipsoidal rejectection sampler
sampler = pints.NestedController(log_likelihood, log_prior, method=pints.NestedEllipsoidSampler)
# Set number of iterations
sampler.set_iterations(8000)
# Set the number of posterior samples to generate
sampler.set_n_posterior_samples(1600)
# Do proposals in parallel
sampler.set_parallel(True)
# Use dynamic enlargement factor
sampler._sampler.set_dynamic_enlargement_factor(1)
Run the sampler!
In [3]:
samples = sampler.run()
print('Done!')
In [4]:
# Plot output
import pints.plot
pints.plot.histogram([samples], ref_parameters=[r, k, sigma])
plt.show()
In [5]:
pints.plot.series(samples[:100], problem)
plt.show()
In [6]:
print('marginal log-likelihood = ' + str(sampler.marginal_log_likelihood())
+ ' ± ' + str(sampler.marginal_log_likelihood_standard_deviation()))
In [7]:
print('effective sample size = ' + str(sampler.effective_sample_size()))