In [67]:
from channels.icat_markov import (protocols,
observations,
simulations,
times,
summary_statistics)
In [68]:
from functools import wraps
def simulate_model(**pars):
"""Wrapper function around simulations."""
data = []
for sim, time in zip(simulations, times):
for p, v in pars.items():
try:
sim.set_constant(p, v)
except:
raise RuntimeWarning('Could not set value of {}'.format(p))
return None
sim.reset()
try:
data.append(sim.run(time, log=['environment.time','icat.i_CaT','icat.g','icat.a','icat.r']))
except:
# Failed simulation
del(data)
return None
return data
def log_transform(f):
@wraps(f)
def log_transformed(**log_kwargs):
kwargs = dict([(key[4:], 10**value) if key.startswith("log")
else (key, value)
for key, value in log_kwargs.items()])
return f(**kwargs)
return log_transformed
def log_model(x):
return log_transform(simulate_model)(**x)
In [69]:
test = simulate_model()
In [70]:
ss = summary_statistics(test)
In [71]:
assert(len(ss)==len(observations))
In [79]:
from pyabc import Distribution, RV
limits = {'icat.g_CaT': (0., 2.),
'icat.E_Ca_offset': (0., 200.), # accounts for reversal potential difference from Nernst
'log_icat.p_1': (-50., 50.),
'icat.p_2': (0., 1.),
'log_icat.p_3': (-50., 50.),
'icat.p_4': (0., 2.), # increased as clustering at boundary
'log_icat.p_5': (-50., 50.),
'icat.p_6': (0., 1.),
'log_icat.p_7': (-50., 50.),
'icat.p_8': (0., 1.)}
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
In [80]:
test = log_model(prior.rvs())
In [81]:
import os, tempfile
db_path = ("sqlite:///" +
os.path.join(tempfile.gettempdir(), "hl-1_icat_markov.db"))
print(db_path)
In [82]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
cv_logger = logging.getLogger('CV Estimation')
cv_logger.setLevel(logging.DEBUG)
In [83]:
from pyabc.populationstrategy import AdaptivePopulationSize, ConstantPopulationSize
from ionchannelABC import theoretical_population_size
pop_size = theoretical_population_size(2, len(limits))
print("Theoretical minimum population size is {} particles".format(pop_size))
In [84]:
from pyabc import ABCSMC
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler
from ionchannelABC import IonChannelDistance, EfficientMultivariateNormalTransition, IonChannelAcceptor
abc = ABCSMC(models=log_model,
parameter_priors=prior,
distance_function=IonChannelDistance(
exp_id=list(observations.exp_id),
variance=list(observations.variance),
delta=0.01),
population_size=ConstantPopulationSize(5000),
#population_size=AdaptivePopulationSize(
# start_nr_particles=10000,
# mean_cv=0.4,
# max_population_size=30000,
# min_population_size=pop_size),
summary_statistics=summary_statistics,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(initial_epsilon=20),
sampler=MulticoreEvalParallelSampler(n_procs=12),
acceptor=IonChannelAcceptor())
In [85]:
obs = observations.to_dict()['y']
obs = {str(k): v for k, v in obs.items()}
In [86]:
abc_id = abc.new(db_path, obs)
In [ ]:
history = abc.run(minimum_epsilon=0.0, max_nr_populations=100, min_acceptance_rate=0.01)
In [ ]:
history = abc.run(minimum_epsilon=0.0, max_nr_populations=100, min_acceptance_rate=0.001)
In [88]:
from pyabc import History
In [92]:
history = History('sqlite:////scratch/cph211/tmp/hl-1_icat_markov.db')
history.all_runs()
Out[92]:
In [93]:
history.id = 3
In [94]:
df, w = history.get_distribution(m=0)
In [95]:
df.describe()
Out[95]:
In [96]:
from ionchannelABC import plot_parameters_kde
g = plot_parameters_kde(df, w, limits, aspect=12,height=0.6)
In [97]:
# Generate parameter samples
n_samples = 100
df, w = history.get_distribution(m=0)
th_samples = df.sample(n=n_samples, weights=w, replace=True).to_dict(orient='records')
In [98]:
plotting_obs = observations.copy()
In [99]:
plotting_obs.rename({'exp_id': 'exp', 'variance': 'errs'}, axis=1, inplace=True)
In [100]:
import numpy as np
plotting_obs['errs'] = np.sqrt(plotting_obs['errs'])
In [101]:
# Generate sim results samples
import pandas as pd
samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
results = summary_statistics(log_model(th))
output = pd.DataFrame({'x': plotting_obs.x, 'y': list(results.values()),
'exp': plotting_obs.exp})
#output = model.sample(pars=th, n_x=50)
output['sample'] = i
output['distribution'] = 'post'
samples = samples.append(output, ignore_index=True)
In [102]:
from ionchannelABC import plot_sim_results
import seaborn as sns
sns.set_context('talk')
g = plot_sim_results(samples, obs=plotting_obs)
# Set axis labels
xlabels = ["voltage, mV", "voltage, mV", "voltage, mV", "time, ms"]#, "time, ms","voltage, mV"]
ylabels = ["normalised current density, pA/pF", "activation", "inactivation", "recovery"]#, "normalised current","current density, pA/pF"]
for ax, xl in zip(g.axes.flatten(), xlabels):
ax.set_xlabel(xl)
for ax, yl in zip(g.axes.flatten(), ylabels):
ax.set_ylabel(yl)
In [31]:
#g.savefig('results/icat-generic/icat_sim_results.pdf')
In [103]:
def plot_sim_results_all(samples: pd.DataFrame):
with sns.color_palette("gray"):
grid = sns.relplot(x='x', y='y',
col='exp',
units='sample',
kind='line',
data=samples,
estimator=None, lw=0.5,
alpha=0.5,
#estimator=np.median,
facet_kws={'sharex': 'col',
'sharey': 'col'})
return grid
In [104]:
grid2 = plot_sim_results_all(samples)
In [33]:
#grid2.savefig('results/icat-generic/icat_sim_results_all.pdf')
In [35]:
import numpy as np
In [42]:
# Mean current density
print(np.mean(samples[samples.exp=='0'].groupby('sample').min()['y']))
# Std current density
print(np.std(samples[samples.exp=='0'].groupby('sample').min()['y']))
In [43]:
import scipy.stats as st
peak_current = samples[samples['exp']=='0'].groupby('sample').min()['y'].tolist()
rv = st.rv_discrete(values=(peak_current, [1/len(peak_current),]*len(peak_current)))
In [44]:
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [45]:
# Voltage of peak current density
idxs = samples[samples.exp=='0'].groupby('sample').idxmin()['y']
print("mean: {}".format(np.mean(samples.iloc[idxs]['x'])))
print("STD: {}".format(np.std(samples.iloc[idxs]['x'])))
In [46]:
voltage_peak = samples.iloc[idxs]['x'].tolist()
rv = st.rv_discrete(values=(voltage_peak, [1/len(voltage_peak),]*len(voltage_peak)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [48]:
# Half activation potential
# Fit of activation to Boltzmann equation
from scipy.optimize import curve_fit
grouped = samples[samples['exp']=='1'].groupby('sample')
def fit_boltzmann(group):
def boltzmann(V, Vhalf, K):
return 1/(1+np.exp((Vhalf-V)/K))
guess = (-30, 10)
popt, _ = curve_fit(boltzmann, group.x, group.y)
return popt
output = grouped.apply(fit_boltzmann).apply(pd.Series)
In [49]:
print(np.mean(output))
print(np.std(output))
In [50]:
Vhalf = output[0].tolist()
rv = st.rv_discrete(values=(Vhalf, [1/len(Vhalf),]*len(Vhalf)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [51]:
slope = output[1].tolist()
rv = st.rv_discrete(values=(slope, [1/len(slope),]*len(slope)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [52]:
# Half activation potential
grouped = samples[samples['exp']=='2'].groupby('sample')
def fit_boltzmann(group):
def boltzmann(V, Vhalf, K):
return 1-1/(1+np.exp((Vhalf-V)/K))
guess = (-100, 10)
popt, _ = curve_fit(boltzmann, group.x, group.y,
bounds=([-100, 1], [0, 30]))
return popt
output = grouped.apply(fit_boltzmann).apply(pd.Series)
In [53]:
print(np.mean(output))
print(np.std(output))
In [54]:
Vhalf = output[0].tolist()
rv = st.rv_discrete(values=(Vhalf, [1/len(Vhalf),]*len(Vhalf)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [55]:
slope = output[1].tolist()
rv = st.rv_discrete(values=(slope, [1/len(slope),]*len(slope)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [56]:
# Recovery time constant
grouped = samples[samples.exp=='3'].groupby('sample')
def fit_single_exp(group):
def single_exp(t, I_max, tau):
return I_max*(1-np.exp(-t/tau))
guess = (1, 50)
popt, _ = curve_fit(single_exp, group.x, group.y, guess)
return popt[1]
output = grouped.apply(fit_single_exp)
In [57]:
print(np.mean(output))
print(np.std(output))
In [58]:
tau = output.tolist()
rv = st.rv_discrete(values=(tau, [1/len(tau),]*len(tau)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [ ]: