Using full AP/CaT measures to fit the remaining conductances.
In [1]:
# PyABC imports
from pyabc import (ABCSMC, Distribution, RV,
History, MedianEpsilon)
from pyabc.populationstrategy import AdaptivePopulationSize
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler
In [2]:
# Custom imports
from ionchannelABC import (ion_channel_sum_stats_calculator,
IonChannelAcceptor,
IonChannelDistance,
EfficientMultivariateNormalTransition,
calculate_parameter_sensitivity,
plot_parameter_sensitivity,
plot_regression_fit,
plot_parameters_kde,
plot_distance_weights)
In [3]:
# Other necessary imports
import numpy as np
import subprocess
import pandas as pd
import io
import os
import tempfile
In [4]:
# Plotting imports
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%config Inline.Backend.figure_format = 'retina'
In [5]:
history = {'icat': History('sqlite:///results/icat/hl-1_icat.db'),
'ical': History('sqlite:///results/ical/hl-1_ical.db'),
'ina': History('sqlite:///results/ina/hl-1_ina.db'),
'ikr': History('sqlite:///results/ikr/hl-1_ikr.db'),
'ikur': History('sqlite:///results/ikur/hl-1_ikur.db'),
'ito': History('sqlite:///results/ito/hl-1_ito.db'),
'iha': History('sqlite:///results/iha/hl-1_iha.db'),
'ik1': History('sqlite:///results/ik1/hl-1_ik1.db')}
In [6]:
# Set to correct result (last ABC run in database).
for _,h in history.items():
h.id = len(h.all_runs())
In [7]:
# Create a prior from each channel's ABC posterior
from pyabc.transition import MultivariateNormalTransition
class rv_normal_fit(MultivariateNormalTransition):
def __init__(self, key):
self.key = key
super().__init__()
def fit(self, X, w):
X.columns = [self.key]
super().fit(pd.DataFrame(X), w)
def rvs(self):
sample = super().rvs()
return sample.values[0]
def pdf(self, x):
# wrap in pd.Series
x = pd.Series({self.key: x})
val = super().pdf(x)
return val
In [24]:
# Add to priors
prior = {}
for k, h in history.items():
df, w = h.get_distribution(m=0)
for key in df.keys():
dist = rv_normal_fit(k+'.'+key)
dist.fit(pd.DataFrame(df[key]), w)
prior[k+'.'+key] = dist
In [9]:
from channels.hl1 import hl1 as model
In [10]:
model.sample({})
Out[10]:
In [11]:
# Targets from Dias, 2014 from HL1-6 myocyte
measurements = model.get_experiment_data()
obs = measurements.to_dict()['y']
exp = measurements.to_dict()['exp']
errs = measurements.to_dict()['errs']
In [12]:
measurements
Out[12]:
In [19]:
# Treat each entry as a separate experiment
for k, _ in exp.items():
exp[k] = k
In [25]:
# Add additional parameters to be fit using full cell
# measurements as uniform priors.
limits = {'ik1.g_K1': (0, 0.1),
'incx.k_NCX': (0, 20),
'icab.g_Cab': (0, 0.001),
'inab.g_Nab': (0, 0.01),
'inak.i_NaK_max': (0, 10)}
#'serca.V_max': (0, 10),
#'ca_diffusion.tau_tr': (0, 1000)}
#'ryanodine_receptors.k_RyR': (0, 0.1)}
for key, (a,b) in limits.items():
prior[key] = RV("uniform", a, b-a)
In [26]:
len(prior)
Out[26]:
In [27]:
prior = Distribution(**prior)
In [31]:
parameters = list(prior.keys())
In [32]:
distance_fn=IonChannelDistance(
obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.01)
In [33]:
sns.set_context('talk')
g = plot_distance_weights(model, distance_fn)
In [34]:
print(distance_fn.w)
In [35]:
g.savefig('results/extra/dist_weights.pdf')
In [36]:
fitted, regression_fit, r2 = calculate_parameter_sensitivity(
model,
parameters,
distance_fn,
sigma=0.025,
n_samples=5000)
In [37]:
# Finding insensitive parameters
cutoff = 0.05
fitted_pivot = fitted.pivot(index='param',columns='exp')
insensitive_params = fitted_pivot[(abs(fitted_pivot['beta'][0])<cutoff) & (abs(fitted_pivot['beta'][1])<cutoff) &
(abs(fitted_pivot['beta'][2])<cutoff) & (abs(fitted_pivot['beta'][3])<cutoff) &
(abs(fitted_pivot['beta'][4])<cutoff) & (abs(fitted_pivot['beta'][5])<cutoff) &
(abs(fitted_pivot['beta'][6])<cutoff)].index.values
In [28]:
insensitive_params = np.array(['ical.q1', 'ical.q2', 'ical.q3', 'ical.q4',
'ical.q5', 'ical.q9', 'icat.g_CaT', 'icat.p3', 'icat.p4',
'icat.p5', 'icat.p6', 'icat.q2', 'icat.q3', 'icat.q4', 'icat.q5',
'icat.q6', 'iha.p3', 'iha.p4', 'iha.p5', 'iha.p6', 'iha.p7',
'ikr.k_f', 'ikr.p5', 'ikr.p6', 'ikr.q5', 'ikr.q6', 'ikur.k_atau3',
'ikur.k_iss1', 'ikur.k_iss2', 'ikur.k_itau1', 'ikur.k_itau2',
'ina.p2', 'ina.p3', 'ina.p4', 'ina.q11', 'ina.r11', 'ina.r12',
'ina.r13', 'ina.r16', 'ito.k_xss1', 'ito.k_xtau1', 'ito.k_xtau2',
'ito.k_xtau3', 'ito.k_yss1', 'ito.k_yss2', 'ito.k_ytau1',
'ito.k_ytau2', 'ito.k_ytau3', 'ito.k_ytau4'])
In [29]:
len(insensitive_params)
Out[29]:
In [38]:
sns.set_context('talk')
grid = plot_parameter_sensitivity(fitted, plot_cutoff=0.05)
In [40]:
grid2 = plot_regression_fit(regression_fit, r2)
In [41]:
grid.savefig('results/extra/sensitivity.pdf')
grid2.savefig('results/extra/sensitivity_fit.pdf')
In [30]:
# Generate a sample from history items insensitive to full cell model measurements
def generate_sample(history, insensitive_params, n):
samples = [dict() for i in range(n)]
for k, h in history.items():
dist = h.get_distribution(m=0)
weights = dist[1]
ch_samples = dist[0] \
.sample(n, weights=weights, replace=True) \
.to_dict(orient='records')
ch_samples = [{k+'.'+key: value for key, value in ch.items()
if (k+'.'+key) in insensitive_params}
for ch in ch_samples]
for c, s in zip(ch_samples, samples):
s.update(c)
return samples
In [31]:
# Generate samples
n_samples = 5000
param_samples = generate_sample(history, insensitive_params, n_samples)
In [32]:
model.add_external_par_samples(param_samples)
In [33]:
# New prior without insensitive parameters
prior = {}
for k, h in history.items():
df, w = h.get_distribution(m=0)
for key in df.keys():
if (k+'.'+key) not in insensitive_params:
dist = rv_normal_fit(k+'.'+key)
dist.fit(pd.DataFrame(df[key]), w)
prior[k+'.'+key] = dist
In [34]:
limits = {'ik1.g_K1': (0, 0.1),
'incx.k_NCX': (0, 20),
'icab.g_Cab': (0, 0.001),
'inab.g_Nab': (0, 0.01),
'inak.i_NaK_max': (0, 10)}
#'serca.V_max': (0, 10),
#'ca_diffusion.tau_tr': (0, 1000)}
#'ryanodine_receptors.k_RyR': (0, 0.1)}
for key, (a,b) in limits.items():
prior[key] = RV("uniform", a, b-a)
In [35]:
prior = Distribution(**prior)
In [36]:
db_path = ('sqlite:///' +
os.path.join(tempfile.gettempdir(), "hl-1_extra-fullABC.db"))
print(db_path)
In [37]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
cv_logger = logging.getLogger('CV Estimation')
cv_logger.setLevel(logging.DEBUG)
In [38]:
from pyabc.sampler import SingleCoreSampler
from pyabc.populationstrategy import ConstantPopulationSize
In [39]:
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.01),
population_size=ConstantPopulationSize(5000),
#population_size=AdaptivePopulationSize(
# start_nr_particles=5000,
# mean_cv=0.2,
# max_population_size=5000,
# min_population_size=100),
summary_statistics=ion_channel_sum_stats_calculator,
transitions=EfficientMultivariateNormalTransition(scaling=0.2),
eps=MedianEpsilon(median_multiplier=1.5),
sampler=MulticoreEvalParallelSampler(n_procs=6),
#sampler=SingleCoreSampler(),
acceptor=IonChannelAcceptor())
In [40]:
abc_id = abc.new(db_path, obs)
In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)
In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)
In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)
In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)
In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)
In [32]:
#db_path = 'sqlite:///results/extra/hl-1_extra.db'
db_path = 'sqlite:////scratch/cph211/tmp/hl-1_extra-fullABC.db'
history = History(db_path)
history.all_runs()
Out[32]:
In [33]:
#history.id = 15 # run with err=0.1 and scaling=0.25
#history.id = 17 # run with err=0.01 and scaling=0.5
#history.id = 23 # run with new icat channel fitting using v_offset and no ion concentrations
#history.id = 24 # run with new icat channel fitting using v_offset, no ion concentrations and no APA
history.id = 9
In [8]:
evolution = history.get_all_populations()
grid = sns.relplot(x='t', y='epsilon', size='samples', data=evolution[evolution.t>=0])
#grid.savefig('results/extra/eps_evolution.pdf')
In [34]:
df, w = history.get_distribution(m=0)
In [35]:
df_extra = df.loc[:,[k for k in limits.keys()]]
In [36]:
from ionchannelABC import plot_parameters_kde
g = plot_parameters_kde(df_extra, w, limits, aspect=5, height=1.1)
In [37]:
# Generate parameter samples
n_samples = 100
df, w = history.get_distribution(m=0)
th_samples = df.sample(n=n_samples, weights=w, replace=True).to_dict(orient='records')
In [38]:
# Generate sim results samples
samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
output = model.sample(pars=th)
output['sample'] = i
output['distribution'] = 'post'
samples = samples.append(output, ignore_index=True)
In [39]:
import scipy.stats as st
def evaluate_samples(samples):
for measurement in samples['x'].unique():
filtered = samples[samples.x==measurement]['y'].tolist()
rv = st.rv_discrete(values=(filtered, [1/len(filtered),]*len(filtered)))
print("{}, median: {}".format(measurement, rv.median()))
print("{}, 95% CI: {}".format(measurement, rv.interval(0.95)))
In [40]:
evaluate_samples(samples)
In [49]:
samp_pivot = samples.pivot(index='sample', columns='x', values='y')
In [50]:
from sklearn.cluster import AffinityPropagation
In [51]:
af = AffinityPropagation()
In [52]:
af.fit(samp_pivot.values)
Out[52]:
In [53]:
centers = pd.DataFrame(af.cluster_centers_, columns=samp_pivot.columns)
In [55]:
af.cluster_centers_indices_
Out[55]:
In [56]:
sns.distplot(af.labels_, bins=len(af.cluster_centers_))
Out[56]:
In [41]:
from ionchannelABC import (Experiment, ExperimentStimProtocol)
In [87]:
# Create new experiment to generate traces
pulses = range(5, 45, 5)
stim_times = [1000000] + [2, 4998]*len(pulses)
stim_levels = [0]
for level in pulses:
stim_levels.append(level)
stim_levels.append(0)
#pace_time = np.linspace(0, 10000, 10000)
pace_time = np.linspace(0, sum(stim_times[1:]), sum(stim_times[1:]))
def trace_measurements(data_list):
import numpy as np
data = data_list[0]
if len(data_list) > 1:
for log in data_list[1:]:
data = data.extend(log)
simtime = data['environment.time']
simtime_min = min(simtime)
simtime = [t - simtime_min for t in simtime]
data_output = dict()
data['environment.time'] = simtime
for var in data:
data_output[var] = np.interp(pace_time, simtime, data[var])
return data_output
def unwrap(data, ind_var):
return data[0], True
trace_prot = ExperimentStimProtocol(stim_times, stim_levels,
#measure_index=range(len(stim_times)-4, len(stim_times)),
measure_index=range(1, len(stim_times)),
measure_fn=trace_measurements,
post_fn=unwrap)
dias_conditions = dict(T=305, Ca_o=1800, Na_o=1.4e5, K_o=4e3)
trace_exper = Experiment(trace_prot, None, dias_conditions)
In [88]:
model.add_experiments([trace_exper])
In [93]:
# Generate traces
traces = pd.DataFrame({})
for i, th in enumerate(th_samples[0:10]):
output = model.sample(pars=th,exp_num=2)
output['sample'] = i
traces = traces.append(output, ignore_index=True)
In [45]:
import pickle
with open('traces.pkl','wb') as f:
pickle.dump(traces, f)
In [37]:
import pickle
with open('traces.pkl','rb') as f:
traces = pickle.load(f)
In [94]:
traces.x.unique()
Out[94]:
In [95]:
time = traces[traces.x=='environment.time'].y.tolist()[0]
In [102]:
#plotlims = (4950, 5400)
plotlims = [(4950+5000*i,5400+5000*i) for i in range(7)]
In [106]:
#idxlims = np.intersect1d(np.where(time >= plotlims[0]), np.where(time <= plotlims[1]))
idxlims = [np.intersect1d(np.where(time >= plotlims[i][0]), np.where(time <= plotlims[i][1]))
for i in range(7)]
In [108]:
#time_limited = time[idxlims[0]:idxlims[-1]]
time_limited = time[idxlims[0][0]:idxlims[0][-1]]
In [110]:
traces = traces.set_index(['x','sample'])
In [111]:
del traces['exp']
In [112]:
traces.reset_index(inplace=True)
In [140]:
rows = []
In [141]:
#_ = traces.apply(lambda row: [rows.append([row['x'], row['sample'], yi, ti])
# for (ti, yi) in zip(time_limited, row.y[idxlims[0]:idxlims[-1]])], axis=1)
for i in range(7):
_ = traces.apply(lambda row: [rows.append([row['x'], row['sample']+10*i, yi, ti])
for (ti, yi) in zip(time_limited, row.y[idxlims[i][0]:idxlims[i][-1]])], axis=1)
In [142]:
traces_new = pd.DataFrame(rows, columns=['x', 'sample', 'y', 'time']).set_index(['x', 'sample'])
In [143]:
traces_new.reset_index(inplace=True)
In [144]:
no_ap_samples = []
In [161]:
for i in range(max(traces_new['sample'])):
if max(traces_new[(traces_new['sample']==i) & (traces_new['x']=='membrane.V')]['y']) < 0:
no_ap_samples.append(i)
In [162]:
no_ap_samples
Out[162]:
In [167]:
for val in no_ap_samples:
traces_new = traces_new[~(traces_new['sample']==val)]
In [192]:
plot = sns.lineplot(x='time',y='y',hue='x',
data=traces_new[(traces_new.x=='ik1.i_K1') | (traces_new.x=='incx.i_NCX') |
(traces_new.x=='inak.i_NaK') | (traces_new.x=='iha.i_ha') |
(traces_new.x=='inab.i_Nab') | (traces_new.x=='icab.i_Cab')],
estimator=np.median, ci=95)
In [193]:
fig = plot.get_figure()
fig.savefig('results/extra/small_currents.pdf')
In [225]:
grid = sns.relplot(x='time', y='y', data=traces_new, row='x',
estimator=np.median, ci=95, kind='line',
facet_kws={'sharex': 'row',
'sharey': False})