Probablistic model for spot latitude distribution

Load spots properties from friedrich:


In [1]:
%matplotlib inline
# Import dev version of friedrich:
import sys, os 
sys.path.insert(0, '../')

import numpy as np
import matplotlib.pyplot as plt
from friedrich.analysis import Transit, Spot, Measurement, MCMCResults
from friedrich.lightcurve import hat11_params_morris_experiment
from glob import glob

archive_paths = sorted(glob('/local/tmp/friedrich/hat11_flip_lambda/chains???.hdf5'))
# archive_paths = sorted(glob('/Users/bmmorris/data/hat11_flip_lambda/chains???.hdf5'))

print('load results')
transits = []
all_times = []
for archive_path in archive_paths:
    m = MCMCResults(archive_path, hat11_params_morris_experiment())
    all_times.extend(m.lc.times.jd)
    spots = m.get_spots()
    transits.append(Transit(spots))


load results

In [2]:
from friedrich.orientation import times_to_occulted_lat_lon
from friedrich.lightcurve import hat11_params_morris_experiment

transit_params = hat11_params_morris_experiment()
all_lats = []
all_lons = []
all_amps = []
all_lats_errors = []
all_spot_times = []
all_BICs = []
for transit in transits:
    for spot in transit.spots: 
        latitude, longitude = times_to_occulted_lat_lon(np.array([spot.t0.value]), 
                                                        transit_params)
        all_lats.append(latitude)
        all_lons.append(longitude)
        all_amps.append(spot.amplitude.value)
        all_spot_times.append(spot.t0.value)
        all_BICs.append(spot.delta_BIC)
        all_lats_errors.append(np.mean([spot.amplitude.upper, spot.amplitude.lower]))
all_lats = np.array(all_lats)
all_lats_errors = np.array(all_lats_errors)
all_lons = np.array(all_lons)
all_amps = np.array(all_amps)
all_spot_times = np.array(all_spot_times)
all_BICs = np.array(all_BICs)

In [16]:
#all_amps
# plt.plot_date(Time(all_spot_times[all_BICs > 10], format='jd').plot_date, 
#               np.degrees(all_lats[all_BICs > 10]), '.')
from astropy.time import Time

import seaborn as sns
sns.set(style='whitegrid')

n_panels = 4
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(14, 8))
gs = gridspec.GridSpec(2, 4)
ax0 = plt.subplot(gs[0, :])
ax1 = [plt.subplot(gs[1, i]) for i in range(n_panels)]

#fig, ax = plt.subplots(1, n_panels, figsize=(14, 3))
first_spot = all_spot_times[all_BICs > 10].min()
last_spot = all_spot_times[all_BICs > 10].max()
first_to_last = last_spot - first_spot
fraction_gt_median = np.zeros(n_panels)

ax0.plot_date(Time(all_spot_times[all_BICs > 10], format='jd').plot_date, 
              np.degrees(all_lats[all_BICs > 10]), '.')

for i in range(n_panels):
    extent = [-60, 60]
    within_time_bin = ((all_spot_times[all_BICs > 10] - first_spot > 
                        i/n_panels*first_to_last) & 
                       (all_spot_times[all_BICs > 10] - first_spot < 
                        (i+1)/n_panels*first_to_last))
    lats_in_time_bin = np.degrees(all_lats[all_BICs > 10][within_time_bin])
    ax1[i].hist(lats_in_time_bin, 30, range=extent)

    ax1[i].hist(np.degrees(all_lats[all_BICs > 10]), 30,
                range=extent, alpha=0.2, color='k', zorder=10)

    ax1[i].set_xlim(extent)
    
    ax1[i].set(title='Segment {0}'.format(i))
    
    if i == 0:
        segment_zero_mean = np.median(lats_in_time_bin)
    fraction_gt_median[i] = np.count_nonzero(lats_in_time_bin > segment_zero_mean)/len(lats_in_time_bin)

ax1[0].set_ylabel('Frequency')
ax0.set_ylabel('Latitude [deg]')

fig.text(0.5, 0.05, 'Latitude [deg]', ha='center', fontsize=14)
fig.savefig('plots/latitude_distribution.pdf', bbox_inches='tight')
#fig.subplots_adjust()


Out[16]:
<matplotlib.text.Text at 0x7f2814a92e80>

In [4]:
ignore_high_latitudes = ((all_lats > np.radians(-40)) & (all_lats < np.radians(50)))
significance_cutoff = np.atleast_2d(all_BICs > 10).T
significant_latitudes = np.degrees(all_lats[significance_cutoff & ignore_high_latitudes])
significant_times = np.atleast_2d(all_spot_times).T[significance_cutoff & ignore_high_latitudes]
significant_amps = np.atleast_2d(all_amps).T[significance_cutoff & ignore_high_latitudes]
significant_latitudes_errors = np.ones_like(significant_latitudes) * 5

Gaussian mixture model with scikit-learn

Note that it is not well physically motivated to model the distribution as three gaussians, because while it may be a good approximation to model the active latitudes with Gaussians, the perhaps uniform background of spots distributed isotropically on the surface of the star would be modeled with a cosine function.


In [5]:
from sklearn.mixture import GMM

X = np.atleast_2d(significant_latitudes).T

fig, ax = plt.subplots()
M_best = GMM(2).fit(X)#models[np.argmin(AIC)]

x = np.atleast_2d(np.linspace(-60, 60, 1000)).T
# logprob, responsibilities = M_best.eval(x)
logprob = M_best.score(x)
responsibilities = M_best.predict_proba(x)

pdf = np.exp(logprob)
pdf_individual = responsibilities * pdf[:, np.newaxis]

ax.hist(X, 30, normed=True, histtype='stepfilled', alpha=0.4)
ax.plot(x, pdf, '-k')
ax.plot(x, pdf_individual, '--k')
ax.text(0.04, 0.96, "Best-fit Mixture",
        ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel('$x$')
ax.set_ylabel('$p(x)$')


Out[5]:
<matplotlib.text.Text at 0x7f2817870470>

Modified Gaussian Mixture Model with emcee

...with a cosine-bias centered on the stellar equator.


In [6]:
import emcee
from scipy.misc import logsumexp

def gaussian(x, mean, lnvar, amp):
    var = np.exp(lnvar)
    return amp/np.sqrt(2*np.pi*var) * np.exp(-0.5 * (x - mean)**2 / var)

def lnlikelihood_gaussian(x, yerr, mean, lnvar, amp):
    var = np.exp(lnvar) + yerr**2
#     return  -0.5 * (x - mean)**2 / var - 0.5 * np.log(2*np.pi*var) + np.log(amp)
    return  -0.5 * ((x - mean)**2 / var + np.log(var)) + np.log(amp)

def lnlikelihood_sum_gaussians(parameters, x, yerr):
    a1, mean_latitude, lnv1, lnv2, new_i_s = parameters
    a2 = 1 - a1
    delta_i_s = transit_params.inc_stellar - new_i_s
    l1 = -mean_latitude - delta_i_s
    l2 = mean_latitude - delta_i_s

    ln_likes = (lnlikelihood_gaussian(x, yerr, l1, lnv1, a1),
                lnlikelihood_gaussian(x, yerr, l2, lnv2, a2))
    return np.sum(np.logaddexp.reduce(ln_likes)), ln_likes

def minimize_this(parameters, x, yerr):
    return -1*lnlikelihood_sum_gaussians(parameters, x, yerr)

def model(parameters, x):
    a1, mean_latitude, lnv1, lnv2, new_i_s = parameters
    a2 = 1 - a1
    delta_i_s = transit_params.inc_stellar - new_i_s
    l1 = -mean_latitude - delta_i_s
    l2 = mean_latitude - delta_i_s
    return (gaussian(x, l1, lnv1, a1) + gaussian(x, l2, lnv2, a2))

def model_components(parameters, x):
    a1, mean_latitude, lnv1, lnv2, new_i_s = parameters
    a2 = 1 - a1
    delta_i_s = transit_params.inc_stellar - new_i_s
    l1 = -mean_latitude - delta_i_s
    l2 = mean_latitude - delta_i_s
    return np.array([gaussian(x, l1, lnv1, a1), gaussian(x, l2, lnv2, a2)]).T

def lnprior(parameters):
    a1, mean_latitude, lnv1, lnv2, new_i_s = parameters
    v1, v2 = np.exp([lnv1, lnv2])
    if (mean_latitude > 3 and 0 > new_i_s > -90 and 1 < v1 < 20**2 and 1 < v2 < 20**2 and 0 < a1 < 1):
        return 0.0
    return -np.inf

def lnprob(parameters, x, yerr):
    lp = lnprior(parameters)
    if not np.isfinite(lp):
        return -np.inf, None
    lnlike, blobs = lnlikelihood_sum_gaussians(parameters, x, yerr)
    return lp + lnlike, blobs

def run_emcee(initp, y, error, n_steps, burnin, threads=4):
    
    ndim, nwalkers = len(initp), 4*len(initp)
    p0 = [np.array(initp) + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
    
    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, 
                                    args=(y, error), threads=threads)

    #pos = sampler.run_mcmc(p0, 500)[0]
    samples = sampler.run_mcmc(p0, n_steps) 
    samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
    bestp = np.median(samples, axis=0)
    return samples, bestp

In [7]:
initp = [0.6, 19, np.log(8**2), np.log(8**2), -80]

plt.hist(significant_latitudes, 30, normed=True, alpha=0.2)
test_lats = np.linspace(-60, 60, 500)
plt.plot(test_lats, model(initp, test_lats), label='Initial parameters')
plt.legend(loc='upper left')
plt.xlabel('Latitude $\ell$ [deg]', fontsize=15)
plt.ylabel('$p(\ell)$', fontsize=18)


Out[7]:
<matplotlib.text.Text at 0x7f281711c6d8>

In [8]:
# ndim, nwalkers = len(initp), 4*len(initp)
# p0 = [np.array(initp) + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
# sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, 
#                                 args=(significant_latitudes, significant_latitudes_errors),
#                                 threads=4)

# pos = sampler.run_mcmc(p0, 500)[0]
# samples = sampler.run_mcmc(pos, 2500)
# bestp = np.median(samples, axis=0)

samples, bestp = run_emcee(initp, significant_latitudes, 
                           significant_latitudes_errors, 
                           n_steps=10000, burnin=2000)

bestp_std = np.std(samples, axis=0)

In [9]:
plt.hist(significant_latitudes, 30, normed=True, alpha=0.2)
#test_lats = np.linspace(-60, 60)
plt.plot(test_lats, model(initp, test_lats), label='Initial parameters')
plt.plot(test_lats, model(bestp, test_lats), lw=2, label='Best-fit model')
plt.legend(loc='upper left')
plt.xlabel('Latitude $\ell$ [deg]', fontsize=15)
plt.ylabel('$p(\ell)$', fontsize=18)
print(bestp)


[  2.33174843e-03   3.09460525e+02   2.92810654e+00   5.81273560e+00
  -4.42974769e+01]

In [10]:
import corner 

# a1, m1, v1, a2, m2, v2
labels = "a1, mean_latitude, lnv1, lnv2, new_i_s".split(', ')
fig = corner.corner(samples, labels=labels)



In [11]:
mcmc_solutions = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), 
                     zip(*np.percentile(samples, [16, 50, 84], axis=0)))
a1_mcmc, mean_latitude_mcmc, lnv1_mcmc, lnv2_mcmc, new_i_s_mcmc = mcmc_solutions
print(new_i_s_mcmc)


(-44.29747686342462, 30.686895046200924, 31.116904562239611)

In [12]:
delta_i_s = transit_params.inc_stellar - bestp[4]
l1 = -bestp[1] - delta_i_s
l2 = bestp[1] - delta_i_s
components = np.array([gaussian(test_lats, l1, bestp[2], bestp[0]), 
                       gaussian(test_lats, l2, bestp[3], 1-bestp[0])]).T

plt.hist(significant_latitudes, 30, normed=True, 
         alpha=0.5, histtype='stepfilled', color='k')

plt.title('Model components')
plt.plot(test_lats, components, lw=2)
plt.plot(test_lats, model(bestp, test_lats), ls='--', lw=2)
plt.legend(loc='upper left')
plt.xlabel('Latitude $\ell$ [deg]', fontsize=15)
plt.ylabel('$p(\ell)$', fontsize=18)
plt.show()


/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/matplotlib/axes/_axes.py:519: UserWarning: No labelled objects found. Use label='...' kwarg on individual plots.
  warnings.warn("No labelled objects found. "

In [13]:
print("mean latitudes: {0}".format(bestp[1]))
print("i_s: {0}".format(bestp[4]))


mean latitudes: 309.46052451362914
i_s: -44.29747686342462

Draw from this distribution:


In [14]:
cdf = np.cumsum(model(bestp, test_lats))
cdf /= np.max(cdf)

pred_lat = lambda x: np.interp(x, cdf, test_lats)

plt.plot(test_lats, cdf)


Out[14]:
[<matplotlib.lines.Line2D at 0x7f2815378cc0>]

In [15]:
plt.hist(pred_lat(np.random.rand(1000)), 30);



calculate probability of being in either latitude:


In [15]:
norm = 0.0
post_prob_g1 = np.zeros(len(significant_latitudes))
post_prob_g2 = np.zeros(len(significant_latitudes))
for i in range(sampler.chain.shape[1]):
    for j in range(sampler.chain.shape[2]):
        like_g1, like_g2 = sampler.blobs[i][j]
        post_prob_g1 += np.exp(like_g1 - np.logaddexp(like_g1, like_g2))
        post_prob_g2 += np.exp(like_g2 - np.logaddexp(like_g1, like_g2))
        norm += 1
post_prob_g1 /= norm
post_prob_g2 /= norm


---------------------------------------------------------------------------
NameError                                 Traceback (most recent call last)
<ipython-input-15-ee9518293629> in <module>()
      2 post_prob_g1 = np.zeros(len(significant_latitudes))
      3 post_prob_g2 = np.zeros(len(significant_latitudes))
----> 4 for i in range(sampler.chain.shape[1]):
      5     for j in range(sampler.chain.shape[2]):
      6         like_g1, like_g2 = sampler.blobs[i][j]

NameError: name 'sampler' is not defined

In [16]:
fig, ax = plt.subplots(1, 2, figsize=(14, 6))
ax[0].plot_date(Time(significant_times, format='jd').plot_date, 
             significant_latitudes, 'ko', alpha=0.4)


likely_g1 = post_prob_g1 > 0.95
likely_g2 = post_prob_g2 > 0.95

ax[0].plot_date(Time(significant_times[likely_g1], format='jd').plot_date, 
             significant_latitudes[likely_g1], 'ro')
ax[0].plot_date(Time(significant_times[likely_g2], format='jd').plot_date, 
             significant_latitudes[likely_g2], 'bo')
ax[0].grid()
for l in ax[0].get_xticklabels():
    l.set_rotation(45)
    l.set_ha('right')

ax[1].hist(significant_amps[likely_g2], 20, histtype='stepfilled', 
           color='b', alpha=0.5, label='South')

ax[1].hist(significant_amps[likely_g1], 20, histtype='stepfilled', 
           color='r', alpha=0.5, label='North')
ax[1].legend()

ax[0].set_ylabel('Latitude [deg]')
ax[1].set(ylabel='Frequency', xlabel='Spot amplitude $\Delta F/F$')

plt.show()



Repeat the above analysis for the Sun:


In [17]:
paths = glob('/local/tmp/Mt_Wilson_Tilt/*/sspot??.dat')
# paths = glob('/Users/bmmorris/data/Mt_Wilson_Tilt/*/sspot??.dat')

from astropy.time import Time
import astropy.units as u
from astropy.table import Table

def split_interval(string, n, cast_to_type=float):
    return [cast_to_type(string[i:i+n]) for i in range(0, len(string), n)]

all_years_array = []

header = ("jd n_spots_leading n_spots_following n_spots_day_1 n_spots_day_2 "
          "rotation_rate latitude_drift latitude_day_1 latitude_day_2 longitude_day_1 "
          "longitude_day_2 area_day_1 area_day_2 group_latitude_day_1 group_longitude_day_1 "
          "group_area_day_1 group_area_day_2 polarity_day_1 polarity_change tilt_day_1 tilt_day_2 "
          "group_rotation_rate group_latitude_drift").split()

for path in paths:
    f = open(path).read().splitlines()

    n_rows = len(f) // 3
    n_columns = 23#18
    yearly_array = np.zeros((n_rows, n_columns))

    for i in range(n_rows):
        # First five ints specify time, afterwards specify sunspot data
        int_list = split_interval(f[0+i*3][:18], 2, int)
        month, day, year_minus_1900, hour, minute = int_list[:5]
        year = year_minus_1900 + 1900
        jd = Time("{year:d}-{month:02d}-{day:02d} {hour:02d}:{minute:02d}"
                  .format(**locals())).jd
        row = [jd] + int_list[5:] + split_interval(f[1+i*3], 7) + split_interval(f[2+i*3][1:], 7)
        yearly_array[i, :] = row

    all_years_array.append(yearly_array)

table = Table(np.vstack(all_years_array), names=header)


WARNING: ErfaWarning: ERFA function "dtf2d" yielded 1 of "dubious year (Note 6)" [astropy._erfa.core]

In [18]:
# from sklearn.mixture import GMM

# X = np.atleast_2d(table['latitude_day_1']).T

# fig, ax = plt.subplots()
# M_best = GMM(2).fit(X)#models[np.argmin(AIC)]

# x = np.atleast_2d(np.linspace(-60, 60, 1000)).T
# # logprob, responsibilities = M_best.eval(x)
# logprob = M_best.score(x)
# responsibilities = M_best.predict_proba(x)

# pdf = np.exp(logprob)
# pdf_individual = responsibilities * pdf[:, np.newaxis]

# ax.hist(X, 30, normed=True, histtype='stepfilled', alpha=0.4)
# ax.plot(x, pdf, '-k')
# ax.plot(x, pdf_individual, '--k')
# ax.text(0.04, 0.96, "Best-fit Mixture",
#         ha='left', va='top', transform=ax.transAxes)
# ax.set_xlabel('$x$')
# ax.set_ylabel('$p(x)$')

In [19]:
initp = [0.5, 15, np.log(4**2), np.log(4**2), transit_params.inc_stellar]

ndim, nwalkers = len(initp), 4*len(initp)
#skip_interval = 10
import datetime
start_time = Time('1965-01-01') # solar cycle 20
end_time = Time('1969-01-01')

times_in_range = (table['jd'] > start_time.jd) & (table['jd'] < end_time.jd)
solar_lats = table['latitude_day_1'][times_in_range]
solar_lats_error = np.ones_like(table['latitude_day_1'][times_in_range])
# p0 = [np.array(initp) + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
# sampler_sun = emcee.EnsembleSampler(nwalkers, ndim, lnprob, 
#                                 args=(solar_lats, solar_lats_error),
#                                 threads=4)

# #pos = sampler_sun.run_mcmc(p0, 500)[0]
# #sampler_sun.reset()
# p0 = sampler_sun.run_mcmc(p0, 1000)

samples_sun, bestp_sun = run_emcee(initp, solar_lats, 
                                   solar_lats_error, 
                                   n_steps=1000, burnin=100)

In [20]:
import corner 

# a1, m1, v1, a2, m2, v2
labels = "a1, mean_latitude, lnv1, lnv2, new_i_s".split(', ')
fig = corner.corner(samples_sun, labels=labels)



In [21]:
bestp_sun


Out[21]:
array([  0.34689556,  18.16487928,   3.36121079,   3.68944543,  79.25564418])

In [22]:
# bestp_sun = np.median(samples_sun, axis=0)

delta_i_s = transit_params.inc_stellar - bestp_sun[4]
l1 = -bestp_sun[1] - delta_i_s
l2 = bestp_sun[1] - delta_i_s
components = np.array([gaussian(test_lats, l1, bestp_sun[2], bestp_sun[0]), 
                       gaussian(test_lats, l2, bestp_sun[3], 1-bestp_sun[0])]).T

plt.hist(solar_lats, 30, normed=True, 
         alpha=0.5, histtype='stepfilled', color='k')

plt.title('Model components')
plt.plot(test_lats, components, lw=2)
plt.plot(test_lats, model(bestp_sun, test_lats), ls='--', lw=2)
plt.legend(loc='upper left')
plt.xlabel('Latitude $\ell$ [deg]', fontsize=15)
plt.ylabel('$p(\ell)$', fontsize=18)
plt.show()


/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/matplotlib/axes/_axes.py:519: UserWarning: No labelled objects found. Use label='...' kwarg on individual plots.
  warnings.warn("No labelled objects found. "

In [23]:
#dropbox_path = '/Users/bmmorris/Dropbox/Apps/ShareLaTeX/STSP_HAT-P-11/'
dropbox_path = '/astro/users/bmmorris/Dropbox/Apps/ShareLaTeX/STSP_HAT-P-11/'

fig, ax = plt.subplots(1, 2, figsize=(8,4), sharey=True)

n_bins = 30
hist_kwargs = dict(normed=True, histtype='stepfilled', 
                   alpha=0.4, color='k')

ax[0].hist(significant_latitudes, n_bins, **hist_kwargs)
ax[0].plot(test_lats, model_components(bestp, test_lats), '--k')
ax[0].plot(test_lats, model(bestp, test_lats), '-k', lw=2)
ax[0].set_xlabel('Latitude $\ell$ [degrees]', fontsize=15)
ax[0].set_ylabel('$p(\ell)$', fontsize=18)
ax[0].set_title('HAT-P-11')

ax[1].hist(solar_lats, n_bins, **hist_kwargs)
ax[1].plot(test_lats, model_components(bestp_sun, test_lats), '--k')
ax[1].plot(test_lats, model(bestp_sun, test_lats), '-k', lw=2)
ax[1].set_xlabel('Latitude $\ell$ [degrees]', fontsize=15)
#ax[1].set_ylabel('$p(\ell)$', fontsize=18)
ax[1].set_title('Sun')
plt.draw()
newticks = [l.get_text() for l in ax[1].get_xticklabels()]
ax[1].set_xticklabels([''] + newticks[1:])

ax[0].annotate('2009-2013', (-52, 0.038), va='bottom', fontsize=14)
ax[1].annotate('{0}-{1}'.format(start_time.datetime.year, end_time.datetime.year), 
               (-52, 0.038), va='bottom', fontsize=14)

fig.suptitle('Spot Latitude Distribution', fontsize=15, va='bottom')
fig.subplots_adjust(wspace=0.0)
fig.savefig(os.path.join(dropbox_path, 'figures', 'latitude_distribution.png'), 
            bbox_inches='tight', dpi=600)


Find asymmetric four-year spans:


In [24]:
year_bins = np.arange(1919, 1983)

asymmetry = []
for i in range(len(year_bins)):
    start = Time(datetime.datetime(year_bins[i]-2, 1, 1))
    end = start + 2*u.year
    in_time_bin = (table['jd'] > start.jd) & (table['jd'] < end.jd)
    north_count = np.count_nonzero(table['latitude_day_1'][in_time_bin] > 0)
    south_count = np.count_nonzero(table['latitude_day_1'][in_time_bin] < 0)

    asymmetry.append((north_count - south_count)/(north_count + south_count))
asymmetry = np.array(asymmetry)


WARNING: ErfaWarning: ERFA function "dtf2d" yielded 1 of "dubious year (Note 6)" [astropy._erfa.core]
WARNING: ErfaWarning: ERFA function "utctai" yielded 1 of "dubious year (Note 3)" [astropy._erfa.core]
WARNING: ErfaWarning: ERFA function "taiutc" yielded 1 of "dubious year (Note 4)" [astropy._erfa.core]

In [25]:
plt.plot(year_bins, asymmetry)
plt.xlabel('Year')
plt.ylabel('N-S / N+S')
plt.show()



In [103]:
bestp_sun


Out[103]:
array([  0.34689556,  18.16487928,   3.36121079,   3.68944543,  79.25564418])

In [110]:
year_bins = np.arange(1917, 1986, 2)

mt_wilson_bestps = np.zeros((len(bestp), len(year_bins)))
mt_wilson_stds = np.zeros((len(bestp), len(year_bins)))
labels = "a1, mean_latitude, lnv1, lnv2, new_i_s".split(', ')

initp_sun = np.array([0.5, 15, 5, 5, 80])

for i in range(len(year_bins)):
    start = Time(datetime.datetime(year_bins[i], 1, 1))
    end = start + 2*u.year
    in_time_bin = (table['jd'] > start.jd) & (table['jd'] < end.jd)
    
    solar_lats = table['latitude_day_1'][in_time_bin]
    solar_lats_error = np.ones_like(table['latitude_day_1'][in_time_bin])
    
    samples_i, bestp_i = run_emcee(bestp_sun, solar_lats, 
                                   solar_lats_error, 
                                   n_steps=1000, burnin=800)
    
    fig = corner.corner(samples_i, labels=labels)
    
    plt.figure()
    plt.hist(solar_lats, 30, normed=True, 
         alpha=0.5, histtype='stepfilled', color='k')
    plt.plot(test_lats, model(bestp_i, test_lats), ls='--', lw=2)
    plt.legend(loc='upper left')
    plt.xlabel('Latitude $\ell$ [deg]', fontsize=15)
    plt.ylabel('$p(\ell)$', fontsize=18)
    plt.show()
    
    mt_wilson_bestps[:, i] = np.median(samples_i, axis=0)
    mt_wilson_stds[:, i] = np.std(samples_i, axis=0)


WARNING: ErfaWarning: ERFA function "dtf2d" yielded 1 of "dubious year (Note 6)" [astropy._erfa.core]
WARNING: ErfaWarning: ERFA function "utctai" yielded 1 of "dubious year (Note 3)" [astropy._erfa.core]
WARNING: ErfaWarning: ERFA function "taiutc" yielded 1 of "dubious year (Note 4)" [astropy._erfa.core]
/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/matplotlib/axes/_axes.py:519: UserWarning: No labelled objects found. Use label='...' kwarg on individual plots.
  warnings.warn("No labelled objects found. "
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-110-1122a4fadcdc> in <module>()
     17     samples_i, bestp_i = run_emcee(bestp_sun, solar_lats, 
     18                                    solar_lats_error,
---> 19                                    n_steps=1000, burnin=800)
     20 
     21     fig = corner.corner(samples_i, labels=labels)

<ipython-input-109-832cdb7e1845> in run_emcee(initp, y, error, n_steps, burnin, threads)
     64 
     65     #pos = sampler.run_mcmc(p0, 500)[0]
---> 66     samples = sampler.run_mcmc(p0, n_steps)
     67     samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
     68     bestp = np.median(samples, axis=0)

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/emcee/sampler.py in run_mcmc(self, pos0, N, rstate0, lnprob0, **kwargs)
    155         """
    156         for results in self.sample(pos0, lnprob0, rstate0, iterations=N,
--> 157                                    **kwargs):
    158             pass
    159         return results

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/emcee/ensemble.py in sample(self, p0, lnprob0, rstate0, blobs0, iterations, thin, storechain, mh_proposal)
    257                 for S0, S1 in [(first, second), (second, first)]:
    258                     q, newlnp, acc, blob = self._propose_stretch(p[S0], p[S1],
--> 259                                                                  lnprob[S0])
    260                     if np.any(acc):
    261                         # Update the positions, log probabilities and

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/emcee/ensemble.py in _propose_stretch(self, p0, p1, lnprob0)
    330         # Calculate the proposed positions and the log-probability there.
    331         q = c[rint] - zz[:, np.newaxis] * (c[rint] - s)
--> 332         newlnprob, blob = self._get_lnprob(q)
    333 
    334         # Decide whether or not the proposals should be accepted.

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/emcee/ensemble.py in _get_lnprob(self, pos)
    380 
    381         # Run the log-probability calculations (optionally in parallel).
--> 382         results = list(M(self.lnprobfn, [p[i] for i in range(len(p))]))
    383 
    384         try:

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/site-packages/emcee/interruptible_pool.py in map(self, func, iterable, chunksize)
     92         while True:
     93             try:
---> 94                 return r.get(self.wait_timeout)
     95             except TimeoutError:
     96                 pass

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/multiprocessing/pool.py in get(self, timeout)
    600 
    601     def get(self, timeout=None):
--> 602         self.wait(timeout)
    603         if not self.ready():
    604             raise TimeoutError

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/multiprocessing/pool.py in wait(self, timeout)
    597 
    598     def wait(self, timeout=None):
--> 599         self._event.wait(timeout)
    600 
    601     def get(self, timeout=None):

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/threading.py in wait(self, timeout)
    547             signaled = self._flag
    548             if not signaled:
--> 549                 signaled = self._cond.wait(timeout)
    550             return signaled
    551 

/astro/apps6/anaconda2.0/envs/py3/lib/python3.5/threading.py in wait(self, timeout)
    295             else:
    296                 if timeout > 0:
--> 297                     gotit = waiter.acquire(True, timeout)
    298                 else:
    299                     gotit = waiter.acquire(False)

KeyboardInterrupt: 

In [94]:
mt_wilson_bestps


Out[94]:
array([[  0.51038316,   0.41294006,   0.49903831,   0.44653147,
          0.51408992,   0.5317357 ,   0.44862293,   0.53307352,
          0.46781833,   0.49364815,   0.42230505,   0.28234095,
          0.34504214,   0.47755881,   0.52482294,   0.44676182,
          0.55526631,   0.60618787],
       [ 13.0239991 ,  10.77728027,  15.98377109,   9.63382758,
         20.64072803,  13.77271168,   9.6119796 ,  16.90770227,
         11.383151  ,  21.43536272,  15.72810917,  10.07248157,
         18.20293701,  12.34323431,   9.91853135,  17.52623404,
         12.56821654,   7.67434875],
       [  3.39159643,   4.5656577 ,   3.38902998,   3.25906219,
          3.78933978,   3.50655382,   3.99066525,   3.775451  ,
          3.63082249,   3.48549633,   3.82073317,   2.86679574,
          3.35880711,   3.42857494,   3.10160372,   3.86545659,
          3.64335508,   1.8670005 ],
       [  3.33426278,   4.59475074,   3.57679223,   3.02116803,
          3.84132236,   3.73199864,   2.96761558,   3.65779716,
          3.33357061,   3.81164041,   4.21208341,   3.6759998 ,
          3.68795065,   3.39903709,   3.31472635,   3.92393241,
          3.22916358,   2.51237693],
       [ 79.2142842 ,  82.7103288 ,  80.58159339,  80.84726698,
         78.71503192,  81.18309935,  81.52890786,  80.95614388,
         81.03213175,  83.00159706,  80.56317283,  80.13157803,
         79.24215977,  80.56124758,  78.77273899,  79.62410666,
         80.57914979,  76.8816044 ]])

In [95]:
np.min(mt_wilson_bestps[1:3, :], axis=0)


Out[95]:
array([ 3.39159643,  4.5656577 ,  3.38902998,  3.25906219,  3.78933978,
        3.50655382,  3.99066525,  3.775451  ,  3.63082249,  3.48549633,
        3.82073317,  2.86679574,  3.35880711,  3.42857494,  3.10160372,
        3.86545659,  3.64335508,  1.8670005 ])

In [102]:
#a1, mean_latitude, lnv1, lnv2, new_i_s = parameters
#x = np.mean(mt_wilson_bestps[2:4, :], axis=0)
x = np.min(mt_wilson_bestps[2:4, :], axis=0)
xerr = mt_wilson_stds[2, :]

x = np.exp(x)**0.5
xerr = 0.5 * np.exp(x)**0.5 * xerr

y = mt_wilson_bestps[1, :]
yerr = mt_wilson_stds[1, :]

plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='.')

plt.plot(np.exp(bestp[2])**0.5, bestp[1], 'rs', markersize=10)#, y, xerr=xerr, yerr=yerr, fmt='.')
#plt.xlim([3, 4])
#plt.xlim([3, 4])
plt.xlabel('$\log \sigma^2$')
plt.ylabel('Mean Latitude')


Out[102]:
<matplotlib.text.Text at 0x7ff1890c8a58>

In [ ]:


In [ ]: