In [1]:
import myokit
import myokit.lib.hh as hh
import numpy as np
In [2]:
m = myokit.load_model('models/icat_markov.mmt')
In [3]:
v = m.get('membrane.V')
v.demote()
v.set_rhs(0)
v.set_binding('pace')
In [4]:
states = [
'icat.a',
'icat.r',
]
parameters = [
'icat.p_1',
'icat.p_2',
'icat.p_3',
'icat.p_4',
'icat.p_5',
'icat.p_6',
'icat.p_7',
'icat.p_8',
'icat.g_CaT'
]
current = 'icat.i_CaT'
In [5]:
mm = hh.HHModel(m, states, parameters, current, vm='membrane.V')
In [6]:
# Current-voltage relationship
steps = np.arange(-75, 40, 5)
p1 = myokit.pacing.steptrain(
vsteps=steps,
vhold=-75,
tpre=1000,
tstep=300,
)
t1 = p1.characteristic_time()
s1 = hh.AnalyticalSimulation(mm, p1)
In [7]:
# Inactivation protocol
steps = np.arange(-80, -20, 5)
p2 = myokit.pacing.steptrain_linear(
vstart=-80,
vend=-20,
dv=5,
vhold=-10,
tpre=1000,
tstep=1000,
tpost=200,
)
t2 = p2.characteristic_time()
s2 = hh.AnalyticalSimulation(mm, p2)
In [8]:
# Recovery protocol
steps = [32, 64, 96, 128, 160, 192, 224, 256, 288, 320]
vhold = -80
vpulse = -20
p3 = myokit.Protocol()
for step in steps:
p3.add_step(vhold, 5000)
p3.add_step(vpulse, 300)
p3.add_step(vhold, step)
p3.add_step(vpulse, 300)
t3 = p3.characteristic_time()
s3 = hh.AnalyticalSimulation(mm, p3)
In [9]:
from collections import OrderedDict
class ABCModel(object):
def __init__(self, mm, ss, tt):
self.mm = mm
self.ss = ss
self.tt = tt
self.defaults = OrderedDict(zip(mm.parameters(), mm.default_parameters()))
def __call__(self, parameters):
new_parameters = self.defaults
for name, value in parameters.items():
new_parameters[name] = value
for s in self.ss:
s.reset()
s.set_parameters(list(new_parameters.values()))
return [s.run(t) for s,t in zip(self.ss,self.tt)]
In [10]:
model = ABCModel(mm, [s1,s2], [t1,t2])
In [25]:
# Measurement of raw output
def sum_stats(sim_output):
out = {}
cnt = 0
raw_data = sim_output["data"]
# I-V curve
d0 = raw_data[0].split_periodic(1300)
for d in d0:
d = d.trim_left(1000, adjust=True)
current = d['icat.i_CaT']
index = np.argmax(np.abs(current))
out[str(cnt)] = current[index]
cnt += 1
# Activation data
activation = []
for d in d0:
current = d['icat.i_CaT']
index = np.argmax(np.abs(current))
activation.append(abs(current[index]))
max_activation = np.max(activation)
activation = [a/max_activation for a in activation]
for a in activation:
out[str(cnt)] = a
cnt += 1
# Inactivation data
d1 = raw_data[1].split_periodic(2200)
inactivation = []
for d in d1:
d.trim_left(2000, adjust=True)
current = d['icat.i_CaT']
index = np.argmax(np.abs(current))
inactivation.append(abs(current[index]))
max_inactivation = np.max(inactivation)
inactivation = [i/max_inactivation for i in inactivation]
for i in inactivation:
out[str(cnt)] = i
cnt += 1
return out
In [26]:
from data.icat.data_icat import IV_Nguyen, Act_Nguyen, Inact_Nguyen
In [27]:
obs = IV_Nguyen()[1] + Act_Nguyen()[1] + Inact_Nguyen()[1]
obs = dict(zip(np.arange(len(obs)).tolist(),obs))
In [28]:
exp = ([0 for _ in range(len(IV_Nguyen()[1]))] +
[1 for _ in range(len(Act_Nguyen()[1]))] +
[2 for _ in range(len(Inact_Nguyen()[1]))])
exp = dict(zip(np.arange(len(exp)).tolist(), exp))
In [15]:
from pyabc import (ABCSMC, Distribution, RV,
History, MedianEpsilon)
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler
from pyabc.populationstrategy import AdaptivePopulationSize
In [16]:
limits = {'icat.g_CaT': (0, 10.),
'icat.p_1': (-20, 0),
'icat.p_2': (0, 1.),
'icat.p_3': (-20, 0),
'icat.p_4': (0, 1.),
'icat.p_5': (-20, 0),
'icat.p_6': (0, 1.),
'icat.p_7': (-20, 0),
'icat.p_8': (0, 1.)}
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
In [17]:
import os, tempfile
db_path = ("sqlite:///" +
os.path.join(tempfile.gettempdir(), "hl-1_icat_markov.db"))
print(db_path)
In [18]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
cv_logger = logging.getLogger('CV Estimation')
cv_logger.setLevel(logging.DEBUG)
In [19]:
from ionchannelABC import IonChannelDistance, IonChannelAcceptor, EfficientMultivariateNormalTransition
In [29]:
def simulate_pyabc(parameter):
res = model(parameter)
return {"data": res}
In [21]:
from pyabc.populationstrategy import ConstantPopulationSize
In [22]:
abc = ABCSMC(models=simulate_pyabc,
parameter_priors=prior,
distance_function=IonChannelDistance(
obs=obs,
exp_map=exp),
population_size=ConstantPopulationSize(20),
#population_size=AdaptivePopulationSize(
# start_nr_particles=50,
# mean_cv=0.2,
# max_population_size=5000,
# min_population_size=1000),
summary_statistics=sum_stats,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(),
sampler=MulticoreEvalParallelSampler(n_procs=2),
acceptor=IonChannelAcceptor())
In [23]:
abc_id = abc.new(db_path, obs)
In [24]:
history = abc.run(minimum_epsilon=0.1, max_nr_populations=5, min_acceptance_rate=0.01)
In [128]:
measurements = model.get_experiment_data()
obs = measurements.to_dict()['y']
exp = measurements.to_dict()['exp']
errs = measurements.to_dict()['errs']
In [7]:
limits = dict(g_CaT=(0, 10.),
p_1=(-20, 0),
p_2=(0, 1.),
p_3=(-20, 0),
p_4=(0, 1.),
p_5=(-20, 0),
p_6=(0, 1.),
p_7=(-20, 0),
p_8=(0, 1.))
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
In [8]:
db_path = ("sqlite:///" +
os.path.join(tempfile.gettempdir(), "hl-1_icat_markov.db"))
print(db_path)
In [9]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
cv_logger = logging.getLogger('CV Estimation')
cv_logger.setLevel(logging.DEBUG)
In [11]:
from pyabc.populationstrategy import ConstantPopulationSize, AdaptivePopulationSize
pop_size = theoretical_population_size(2, len(limits))
print("Theoretical minimum population size is {} particles".format(pop_size))
In [12]:
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
obs=obs,
exp_map=exp),
population_size=AdaptivePopulationSize(
start_nr_particles=pop_size,
mean_cv=0.4,
max_population_size=10000,
min_population_size=pop_size),
summary_statistics=ion_channel_sum_stats_calculator,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(initial_epsilon=50),
sampler=MulticoreEvalParallelSampler(n_procs=6),
acceptor=IonChannelAcceptor())
In [13]:
abc_id = abc.new(db_path, obs)
In [ ]:
history = abc.run(minimum_epsilon=0.1, max_nr_populations=100, min_acceptance_rate=0.01)
In [14]:
history = History(db_path)
history.all_runs()
Out[14]:
In [15]:
history.id = 6
In [16]:
sns.set_context('talk')
In [17]:
df, w = history.get_distribution(m=0)
In [18]:
df.describe()
Out[18]:
In [19]:
from ionchannelABC import plot_parameters_kde
g = plot_parameters_kde(df, w, limits,aspect=12,height=0.6)
In [48]:
evolution = history.get_all_populations()
g = sns.relplot(x='t', y='epsilon', size='samples', data=evolution[evolution.t>=0])
#g.savefig('results/icat-generic/eps_evolution.pdf')
In [20]:
# Generate parameter samples
n_samples = 100
df, w = history.get_distribution(m=0)
th_samples = df.sample(n=n_samples, weights=w, replace=True).to_dict(orient='records')
In [21]:
# Generate sim results samples
samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
output = model.sample(pars=th, n_x=50)
output['sample'] = i
output['distribution'] = 'post'
samples = samples.append(output, ignore_index=True)
In [22]:
from ionchannelABC import plot_sim_results
sns.set_context('talk')
g = plot_sim_results(samples, obs=measurements)
# Set axis labels
xlabels = ["voltage, mV", "voltage, mV", "voltage, mV", "time, ms", "time, ms","voltage, mV"]
ylabels = ["current density, pA/pF", "activation", "inactivation", "recovery", "normalised current","current density, pA/pF"]
for ax, xl in zip(g.axes.flatten(), xlabels):
ax.set_xlabel(xl)
for ax, yl in zip(g.axes.flatten(), ylabels):
ax.set_ylabel(yl)
In [70]:
g.savefig('results/icat-generic/icat_sim_results.pdf')
In [71]:
def plot_sim_results_all(samples: pd.DataFrame):
with sns.color_palette("gray"):
grid = sns.relplot(x='x', y='y',
col='exp',
units='sample',
kind='line',
data=samples,
estimator=None, lw=0.5,
alpha=0.5,
#estimator=np.median,
facet_kws={'sharex': 'col',
'sharey': 'col'})
return grid
In [72]:
grid2 = plot_sim_results_all(samples)
In [73]:
grid2.savefig('results/icat-generic/icat_sim_results_all.pdf')
In [74]:
# Mean current density
print(np.mean(samples[samples.exp==0].groupby('sample').min()['y']))
# Std current density
print(np.std(samples[samples.exp==0].groupby('sample').min()['y']))
In [75]:
import scipy.stats as st
peak_current = samples[samples['exp']==0].groupby('sample').min()['y'].tolist()
rv = st.rv_discrete(values=(peak_current, [1/len(peak_current),]*len(peak_current)))
In [76]:
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [77]:
# Voltage of peak current density
idxs = samples[samples.exp==0].groupby('sample').idxmin()['y']
print("mean: {}".format(np.mean(samples.iloc[idxs]['x'])))
print("STD: {}".format(np.std(samples.iloc[idxs]['x'])))
In [78]:
voltage_peak = samples.iloc[idxs]['x'].tolist()
rv = st.rv_discrete(values=(voltage_peak, [1/len(voltage_peak),]*len(voltage_peak)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [79]:
# Half activation potential
# Fit of activation to Boltzmann equation
from scipy.optimize import curve_fit
grouped = samples[samples['exp']==1].groupby('sample')
def fit_boltzmann(group):
def boltzmann(V, Vhalf, K):
return 1/(1+np.exp((Vhalf-V)/K))
guess = (-30, 10)
popt, _ = curve_fit(boltzmann, group.x, group.y)
return popt
output = grouped.apply(fit_boltzmann).apply(pd.Series)
In [80]:
print(np.mean(output))
print(np.std(output))
In [81]:
Vhalf = output[0].tolist()
rv = st.rv_discrete(values=(Vhalf, [1/len(Vhalf),]*len(Vhalf)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [82]:
slope = output[1].tolist()
rv = st.rv_discrete(values=(slope, [1/len(slope),]*len(slope)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [83]:
# Half activation potential
grouped = samples[samples['exp']==2].groupby('sample')
def fit_boltzmann(group):
def boltzmann(V, Vhalf, K):
return 1-1/(1+np.exp((Vhalf-V)/K))
guess = (-100, 10)
popt, _ = curve_fit(boltzmann, group.x, group.y,
bounds=([-100, 1], [0, 30]))
return popt
output = grouped.apply(fit_boltzmann).apply(pd.Series)
In [84]:
print(np.mean(output))
print(np.std(output))
In [85]:
Vhalf = output[0].tolist()
rv = st.rv_discrete(values=(Vhalf, [1/len(Vhalf),]*len(Vhalf)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [86]:
slope = output[1].tolist()
rv = st.rv_discrete(values=(slope, [1/len(slope),]*len(slope)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [87]:
# Recovery time constant
grouped = samples[samples.exp==3].groupby('sample')
def fit_single_exp(group):
def single_exp(t, I_max, tau):
return I_max*(1-np.exp(-t/tau))
guess = (1, 50)
popt, _ = curve_fit(single_exp, group.x, group.y, guess)
return popt[1]
output = grouped.apply(fit_single_exp)
In [88]:
print(np.mean(output))
print(np.std(output))
In [89]:
tau = output.tolist()
rv = st.rv_discrete(values=(tau, [1/len(tau),]*len(tau)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [ ]: