In [1]:
from lnl import lnl

In [2]:
from scipy.optimize import minimize

In [3]:
x0 = [  .13,  70,
         0,   -1]
def fun(x):
    cosmo = {		'omega_m': x[0],
					'H0': x[1],
					'm_chi_over_H0' : 10**x[2],
					'chi0' : 10**(x[3]),
					'chidot0': 0,
					'r_s':147.5
					 }
    return lnl(cosmo)

In [4]:
bounds = [(.1, .15), (60,80), (-2,2), (-4,0)]
bestfit_axion = minimize(fun, x0, 
                         method = 'COBYLA', 
                         #bounds = bounds,
                         options = {'tol' : 1.0e-3,
                                    'rhobeg' : [.005, 1, .1, .1],
                                    'catol' : 1e-6
                                    }).x


lnl is 13.944579
lnl is 8.555346
lnl is 8.549932
lnl is 8.546220
lnl is 8.540107
lnl is 6.905148
lnl is 8.915406
lnl is 7.460592
lnl is 6.903872
lnl is 6.928300
lnl is 6.903071
lnl is 7.192704
lnl is 7.007133
lnl is 6.911078
lnl is 6.935188
/Users/follin/projects/anaconda/lib/python2.7/site-packages/scipy/optimize/_minimize.py:375: RuntimeWarning: Method COBYLA cannot handle bounds.
  RuntimeWarning)

In [5]:
chi2 = 2 * fun(bestfit_axion)
print 'chi2 is ', chi2
print 'bestfit is', bestfit_axion
x = bestfit_axion
from get_background import get_hubble_rate
da, h = get_hubble_rate(**{		'omega_m': x[0],
					'H0': x[1],
					'M_chi_over_H0' : x[2],
					'chi0' : x[3],
					'chidot0': 0,
					'r_s':147.5
					 })
z = linspace(0,20, 10000)
plot(z, da(z)/147.5)
scatter(array([0.35, 0.57, 2.4]), array([6.875, 9.191, 10.8]))
figure()
plot(z, h(z)*3e5)
y = array([12895, 14231, 3e5/9])/147.5
scatter(array([0.35, 0.57, 2.4]), y)


lnl is 6.935188
chi2 is  13.8703752249
bestfit is [  1.39004198e-01   7.00062618e+01   6.25695053e-03  -9.95143201e-01]
Out[5]:
<matplotlib.collections.PathCollection at 0x106e4e410>

In [6]:
x0 = [  1.39245792e-01,  7.22385516e+01]
def fun(x):
    cosmo = {		'omega_m': x[0],
					'H0': x[1],
					'M_chi_over_H0' : .0001,
					'chi0' : 0,
					'chidot0': 0,
					'r_s':147.5
					 }
    return lnl(cosmo)

In [7]:
bounds = [(.1, .15), (60,80)]

bestfit_lcdm = minimize(fun, x0, 
                        method = 'COBYLA', 
                        #bounds = bounds,
                        options = {'tol' : 1.0e-3,
                                    'rhobeg' : [.005, 1],
                                    'catol' : 1e-6
                                    }).x


  File "<ipython-input-7-153de5674d06>", line 6
    options = {'tol' : 1.0e-3,
          ^
SyntaxError: invalid syntax

In [ ]:
chi2 = 2 * fun(bestfit_lcdm)
print 'chi2 is ', chi2
print 'bestfit is', bestfit_lcdm
x = bestfit_lcdm
from get_background import get_hubble_rate
da_lcdm, h_lcdm = get_hubble_rate(**{	'omega_m': x[0],
					'H0': x[1],
					'M_chi_over_H0' : .0001,
					'chi0' : 0,
					'chidot0': 0,
					'r_s':147.5
					 })
z = linspace(0,20, 10000)
plot(z, da_lcdm(z)/147.5)
scatter(array([0.35, 0.57, 2.4]), array([6.875, 9.191, 10.8]))
figure()
plot(z, h_lcdm(z)*3e5)
y = array([12895, 14231, 3e5/9])/147.5
scatter(array([0.35, 0.57, 2.4]), y)

In [ ]:
z = linspace(0,5,1000)
plot(z, h(z)/h_lcdm(z))

In [ ]:
plot(z, da(z)/da_lcdm(z))

In [3]:
from scipy.optimize import show_options
show_options(solver='minimize')#, method = 'COBLYA')


**Minimize options**

*BFGS* options:

    gtol : float
        Gradient norm must be less than `gtol` before successful
        termination.
    norm : float
        Order of norm (Inf is max, -Inf is min).
    eps : float or ndarray
        If `jac` is approximated, use this value for the step size.

*Nelder-Mead* options:

    xtol : float
        Relative error in solution `xopt` acceptable for convergence.
    ftol : float
        Relative error in ``fun(xopt)`` acceptable for convergence.
    maxfev : int
        Maximum number of function evaluations to make.

*Newton-CG* options:

    xtol : float
        Average relative error in solution `xopt` acceptable for
        convergence.
    eps : float or ndarray
        If `jac` is approximated, use this value for the step size.

*CG* options:

    gtol : float
        Gradient norm must be less than `gtol` before successful
        termination.
    norm : float
        Order of norm (Inf is max, -Inf is min).
    eps : float or ndarray
        If `jac` is approximated, use this value for the step size.

*Powell* options:

    xtol : float
        Relative error in solution `xopt` acceptable for convergence.
    ftol : float
        Relative error in ``fun(xopt)`` acceptable for convergence.
    maxfev : int
        Maximum number of function evaluations to make.
    direc : ndarray
        Initial set of direction vectors for the Powell method.

*Anneal* options:

    ftol : float
        Relative error in ``fun(x)`` acceptable for convergence.
    schedule : str
        Annealing schedule to use. One of: 'fast', 'cauchy' or
        'boltzmann'.
    T0 : float
        Initial Temperature (estimated as 1.2 times the largest
        cost-function deviation over random points in the range).
    Tf : float
        Final goal temperature.
    maxfev : int
        Maximum number of function evaluations to make.
    maxaccept : int
        Maximum changes to accept.
    boltzmann : float
        Boltzmann constant in acceptance test (increase for less
        stringent test at each temperature).
    learn_rate : float
        Scale constant for adjusting guesses.
    quench, m, n : float
        Parameters to alter fast_sa schedule.
    lower, upper : float or ndarray
        Lower and upper bounds on `x`.
    dwell : int
        The number of times to search the space at each temperature.

*L-BFGS-B* options:

    ftol : float
        The iteration stops when ``(f^k -
        f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
    gtol : float
        The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
        <= gtol`` where ``pg_i`` is the i-th component of the
        projected gradient.
    maxcor : int
        The maximum number of variable metric corrections used to
        define the limited memory matrix. (The limited memory BFGS
        method does not store the full hessian but uses this many terms
        in an approximation to it.)
    maxiter : int
        Maximum number of function evaluations.

*TNC* options:

    ftol : float
        Precision goal for the value of f in the stoping criterion.
        If ftol < 0.0, ftol is set to 0.0 defaults to -1.
    xtol : float
        Precision goal for the value of x in the stopping
        criterion (after applying x scaling factors).  If xtol <
        0.0, xtol is set to sqrt(machine_precision).  Defaults to
        -1.
    gtol : float
        Precision goal for the value of the projected gradient in
        the stopping criterion (after applying x scaling factors).
        If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
        Setting it to 0.0 is not recommended.  Defaults to -1.
    scale : list of floats
        Scaling factors to apply to each variable.  If None, the
        factors are up-low for interval bounded variables and
        1+|x] fo the others.  Defaults to None
    offset : float
        Value to subtract from each variable.  If None, the
        offsets are (up+low)/2 for interval bounded variables
        and x for the others.
    maxCGit : int
        Maximum number of hessian*vector evaluations per main
        iteration.  If maxCGit == 0, the direction chosen is
        -gradient if maxCGit < 0, maxCGit is set to
        max(1,min(50,n/2)).  Defaults to -1.
    maxiter : int
        Maximum number of function evaluation.  if None, `maxiter` is
        set to max(100, 10*len(x0)).  Defaults to None.
    eta : float
        Severity of the line search. if < 0 or > 1, set to 0.25.
        Defaults to -1.
    stepmx : float
        Maximum step for the line search.  May be increased during
        call.  If too small, it will be set to 10.0.  Defaults to 0.
    accuracy : float
        Relative precision for finite difference calculations.  If
        <= machine_precision, set to sqrt(machine_precision).
        Defaults to 0.
    minfev : float
        Minimum function value estimate.  Defaults to 0.
    rescale : float
        Scaling factor (in log10) used to trigger f value
        rescaling.  If 0, rescale at each iteration.  If a large
        value, never rescale.  If < 0, rescale is set to 1.3.

*COBYLA* options:

    tol : float
        Final accuracy in the optimization (not precisely guaranteed).
        This is a lower bound on the size of the trust region.
    rhobeg : float
        Reasonable initial changes to the variables.
    maxfev : int
        Maximum number of function evaluations.
    catol : float
        Absolute tolerance for constraint violations (default: 1e-6).

*SLSQP* options:

    ftol : float
        Precision goal for the value of f in the stopping criterion.
    eps : float
        Step size used for numerical approximation of the jacobian.
    maxiter : int
        Maximum number of iterations.

*dogleg* options:

    initial_trust_radius : float
        Initial trust-region radius.
    max_trust_radius : float
        Maximum value of the trust-region radius. No steps that are longer
        than this value will be proposed.
    eta : float
        Trust region related acceptance stringency for proposed steps.
    gtol : float
        Gradient norm must be less than `gtol` before successful
        termination.

*trust-ncg* options:

    See dogleg options.

In [ ]: