In [1]:
# PyABC imports
from pyabc import (ABCSMC, Distribution, RV,
History, MedianEpsilon)
from pyabc.populationstrategy import AdaptivePopulationSize
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler
In [2]:
# Custom imports
from ionchannelABC import (ion_channel_sum_stats_calculator,
IonChannelAcceptor,
IonChannelDistance,
EfficientMultivariateNormalTransition,
plot_parameter_sensitivity,
plot_parameters_kde)
In [3]:
# Other necessary imports
import numpy as np
import subprocess
import pandas as pd
import io
import os
import tempfile
In [4]:
# Plotting imports
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%config Inline.Backend.figure_format = 'retina'
In [6]:
from channels.ikur import ikur as model
#model.sample({})
In [7]:
measurements = model.get_experiment_data()
obs = measurements.to_dict()['y']
exp = measurements.to_dict()['exp']
errs = measurements.to_dict()['errs']
In [8]:
limits = dict(g_Kur=(0, 1),
k_ass1=(0, 100),
k_ass2=(0, 100),
k_atau1=(0, 100),
k_atau2=(0, 100),
k_atau3=(0, 10),
k_iss1=(0, 100),
k_iss2=(0, 100),
k_itau1=(0, 10),
k_itau2=(0, 100))
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
In [9]:
parameters = ['ikur.'+k for k in limits.keys()]
In [10]:
distance_fn=IonChannelDistance(
obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.1)
In [11]:
from ionchannelABC import plot_distance_weights
sns.set_context('talk')
grid = plot_distance_weights(model, distance_fn)
grid.savefig('results/ikur/dist_weights.pdf')
In [14]:
grid1, grid2 = plot_parameter_sensitivity(
model,
parameters,
distance_fn,
sigma=0.1,
n_samples=500,
plot_cutoff=0.05)
In [15]:
grid1.savefig('results/ikur/sensitivity.pdf')
grid2.savefig('results/ikur/sensitivity_fit.pdf')
In [16]:
db_path = ('sqlite:///' +
os.path.join(tempfile.gettempdir(), "hl-1_ikur.db"))
print(db_path)
In [17]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
In [18]:
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.1),
population_size=AdaptivePopulationSize(
start_nr_particles=2000,
mean_cv=0.4,
max_population_size=5000,
min_population_size=1000),
summary_statistics=ion_channel_sum_stats_calculator,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(),
sampler=MulticoreEvalParallelSampler(n_procs=12),
acceptor=IonChannelAcceptor())
In [19]:
abc_id = abc.new(db_path, obs)
In [35]:
history = abc.run(minimum_epsilon=0.05, max_nr_populations=10, min_acceptance_rate=0.01)
In [8]:
db_path = ('sqlite:///results/ikur/hl-1_ikur.db')
history = History(db_path)
history.all_runs()
Out[8]:
In [9]:
history.id = 2
In [36]:
sns.set_context('talk')
evolution = history.get_all_populations()
grid = sns.relplot(x='t', y='epsilon', size='samples', data=evolution[evolution.t>=0])
grid.savefig('results/ikur/eps_evolution.pdf')
In [37]:
df, w = history.get_distribution(m=0)
In [38]:
g = plot_parameters_kde(df, w, limits, aspect=5, height=1.1)
In [39]:
g.savefig('results/ikur/parameters_kde.pdf')
In [40]:
# Generate parameter samples
n_samples = 100
df, w = history.get_distribution(m=0)
th_samples = df.sample(n=n_samples, weights=w, replace=True).to_dict(orient='records')
In [41]:
# Generate sim results samples
samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
output = model.sample(pars=th, n_x=50)
output['sample'] = i
output['distribution'] = 'post'
samples = samples.append(output, ignore_index=True)
In [43]:
from ionchannelABC import plot_sim_results
sns.set_context('talk')
g = plot_sim_results(samples, obs=measurements)
# Set axis labels
xlabels = ["voltage, mV", "voltage, mV", "voltage, mV", "time, ms"]
ylabels = ["current density, pA/pF", "activation time constant", "inactivation", "recovery"]
for ax, xl in zip(g.axes.flatten(), xlabels):
ax.set_xlabel(xl)
for ax, yl in zip(g.axes.flatten(), ylabels):
ax.set_ylabel(yl)
In [44]:
g.savefig('results/ikur/ikur_sim_results.pdf')
In [45]:
# Get rid of infinite values
pd.options.mode.use_inf_as_na = True
print(samples.isna().sum())
samples = samples.dropna()
In [46]:
# Activation kinetics measurements from Xu
from scipy.optimize import curve_fit
grouped = samples[samples['exp']==1].groupby('sample')
def fit_single_exp(group):
def single_exp(V, a, b, c):
return a + b*(np.exp(-V/c))
guess = (10, 5, 10)
popt, _ = curve_fit(single_exp, group.x, group.y, guess)
return popt
output = grouped.apply(fit_single_exp).apply(pd.Series)
In [47]:
print(output.mean())
print(output.std())
In [48]:
import scipy.stats as st
a = output[0].tolist()
rv = st.rv_discrete(values=(a, [1/len(a),]*len(a)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [49]:
b = output[1].tolist()
rv = st.rv_discrete(values=(b, [1/len(b),]*len(b)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [50]:
c = output[2].tolist()
rv = st.rv_discrete(values=(c, [1/len(c),]*len(c)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [51]:
# Parameters of Boltzmann fit to inactivation
grouped = samples[samples.exp==2].groupby('sample')
def fit_boltzmann(group):
def boltzmann(V, Vhalf, Shalf):
return 1/(1+np.exp((V-Vhalf)/Shalf))
guess = (50, 10)
popt, _ = curve_fit(boltzmann, group.x, group.y, guess)
return popt
output = grouped.apply(fit_boltzmann).apply(pd.Series)
In [52]:
print(output.mean())
print(output.std())
In [53]:
Vhalf = output[0].tolist()
rv = st.rv_discrete(values=(Vhalf, [1/len(Vhalf),]*len(Vhalf)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [54]:
slope = output[1].tolist()
rv = st.rv_discrete(values=(slope, [1/len(slope),]*len(slope)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [55]:
# Recovery dynamics from Brouillette
grouped = samples[samples.exp==3].groupby('sample')
def fit_single_exp(group):
def single_exp(V, a, b, c):
return a + b*(np.exp(-V/c))
guess = (1, -1, 300)
popt, _ = curve_fit(single_exp, group.x, group.y, guess)
return popt
output = grouped.apply(fit_single_exp).apply(pd.Series)
In [56]:
print(output.mean())
print(output.std())
In [57]:
tau = output[2].tolist()
rv = st.rv_discrete(values=(tau, [1/len(tau),]*len(tau)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))