In [1]:
#Load a specific lightcurve file for testing purposes
import json
import sys
import numpy as np
import emcee
import george
import matplotlib.pyplot as plt
sys.path.append('../classification')
import os
import iminuit
filenames = ['../gen_lightcurves/gp_smoothed/SN2005el_gpsmoothed.json','../gen_lightcurves/gp_smoothed/SDSS-II_SN_18165_gpsmoothed.json']

file_data = {}

for filename in filenames:
    with open(filename, 'r') as f:
         temp_file_data = json.load(f)
    print(temp_file_data.keys())
    for key in temp_file_data:
        file_data[key] = temp_file_data[key]


dict_keys(['r', 'Y', 'u', 'g', 'i'])
dict_keys(['r', 'g', 'i'])

In [2]:
for filt in file_data:
    mjd = np.array(file_data[filt]['mjd'])
    mag = np.array(file_data[filt]['mag'])
    magerr = np.array(file_data[filt]['dmag'])
    modelmjd = file_data[filt]['modeldate']
    modelmag = file_data[filt]['modelmag']
    
    plt.errorbar(mjd,mag,yerr=magerr)
    plt.plot(modelmjd, modelmag)
    plt.title(filt)
    plt.show()



In [3]:
from george import kernels
#kernel = kernels.ConstantKernel(10) * kernels.ExpSquaredKernel(100.) * kernels.DotProductKernel()
#kernel = kernels.ExpSquaredKernel(10.) * kernels.DotProductKernel()
#kernel =  kernels.Matern32Kernel(100.)*kernels.ConstantKernel(100.) * kernels.ExpSquaredKernel(100.)
#kernel =  kernels.Matern32Kernel(50.) * kernels.ConstantKernel(50.) * kernels.ExpSquaredKernel(100) * kernels.DotProductKernel()
#kernel = kernels.Matern32Kernel(50.) * kernels.ExpSquaredKernel(500)
#kernel = kernels.ConstantKernel(50.) * kernels.Matern52Kernel(50.) * kernels.DotProductKernel() + kernels.WhiteKernel(0.5)
kernel = kernels.ConstantKernel(50.) * kernels.Matern52Kernel(50.)
#kernel = kernels.ExpSquaredKernel(10.) + kernels.DotProductKernel()
#k1 = 6.0**2 * kernels.Matern52Kernel(100.)
#k2 = 0.66**2 * kernels.RationalQuadraticKernel(0.78, 1.2**2)
#k3 = 0.18**2 * kernels.ExpSquaredKernel(1.) + kernels.WhiteKernel(0.5)
#kernel = k1 + k3
#print(len(kernel))

In [27]:
import scipy.optimize as op

output_lcurves = {}

for filter_name in file_data:
    if filter_name == 'B__CSP':
        continue
    t = np.array(file_data[filter_name]['mjd'])
    y = np.array(file_data[filter_name]['mag'])
    err = np.array(file_data[filter_name]['dmag'])

    #gp = george.GP(kernel, mean=np.mean(y))
    gp = george.GP(kernel)

    # Define the objective function (negative log-likelihood in this case).
    
    # Define the objective function (negative log-likelihood in this case).
    def nll(p):
        gp.set_parameter_vector(p)
        ll = gp.log_likelihood(y, quiet=True)
        return -ll if np.isfinite(ll) else 1e25

    # And the gradient of the objective function.
    def grad_nll(p):
        gp.set_parameter_vector(p)
        return -gp.grad_log_likelihood(y, quiet=True)

    # You need to compute the GP once before starting the optimization.
    gp.compute(t, err)

    # Print the initial ln-likelihood.
    #print(gp.lnlikelihood(y))

    # Run the optimization routine.
    p0 = gp.get_parameter_vector()    #results = op.minimize(nll, p0, jac=grad_nll, bounds=[(np.log(25.),np.log(5000))])
    min_val = np.log(10**-4.)
    max_val = np.log(10**5.)
    min_val_exp = np.log(10**-3.)
    max_val_exp = np.log(10**4.)
    
    best_lnlikelihood = 1e25
    p = [p0*10**-i for i in range(-2,3)]

    for p0 in p:
        results = op.minimize(nll, p0, jac=grad_nll, bounds=[(min_val, max_val)]*len(kernel))
        print(type(results.x))
        if results.fun < best_lnlikelihood:
            final_results = results
    # Update the kernel and print the final log-likelihood.
    print(type(final_results.x))
    gp.kernel.set_parameter_vector(final_results.x)
    #print(filter_name,final_results.success, final_results.message, final_results.fun, final_results.x)
    #print(gp.lnlikelihood(y))
    #print(gp.kernel.pars)
    
    x = np.linspace(min(t), max(t), 100)
    mu, cov = gp.predict(y,x)
    std = np.sqrt(np.diag(cov))
    #plt.plot(x,mu, color='r')
    #plt.errorbar(t,y,yerr=err)
    #plt.show()
    output_lcurves[filter_name] = {
        't': t,
        'y': y,
        'err': err,
        'out_t': x,
        'out_y': mu,
        'out_err': std,
        'pars': gp.kernel.parameter_vector
    }


[ 6.20789476  8.82427353]
[ 6.20789476  8.82427353]
[ 6.20789387  8.82427266]
[ 6.20789478  8.82427349]
[ 6.20789387  8.82427266]
[ 6.20789387  8.82427266]
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-27-aa35240c61f2> in <module>()
     49     # Update the kernel and print the final log-likelihood.
     50     print(final_results.x)
---> 51     gp.kernel.set_parameter_vector(final_results.x)
     52     #print(filter_name,final_results.success, final_results.message, final_results.fun, final_results.x)
     53     #print(gp.lnlikelihood(y))

TypeError: 'numpy.ndarray' object is not callable

In [22]:
#print(output_lcurves)

In [24]:
for filt in output_lcurves:
    #if not 'rpri' in filt:
    #    continue
    mjd = np.array(output_lcurves[filt]['t'])
    mag = np.array(output_lcurves[filt]['y'])
    magerr = np.array(output_lcurves[filt]['err'])
    modelmjd = output_lcurves[filt]['out_t']
    modelmag = output_lcurves[filt]['out_y']
    pars = output_lcurves[filt]['pars']
    print(10**pars)
    
    plt.errorbar(mjd,mag,yerr=magerr, alpha = 0.6, linewidth=4)
    plt.plot(modelmjd, modelmag, color='black')
    plt.title(filt)
    plt.show()


[  1.61396694e+06   6.67226751e+08]
[  1.93780674e+05   3.25780785e+11]
[  3.03183651e+05   1.76473845e+08]
[  2.16336881e+05   3.25780785e+11]
[  2.12548578e+05   3.25780785e+11]

In [ ]:


In [ ]: