Using full AP/CaT measures to fit the remaining conductances.
In [2]:
# PyABC imports
from pyabc import (ABCSMC, Distribution, RV,
History, MedianEpsilon)
from pyabc.populationstrategy import AdaptivePopulationSize
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler
In [3]:
# Custom imports
from ionchannelABC import (ion_channel_sum_stats_calculator,
IonChannelAcceptor,
IonChannelDistance,
EfficientMultivariateNormalTransition,
plot_parameter_sensitivity,
plot_parameters_kde,
plot_distance_weights)
In [4]:
# Other necessary imports
import numpy as np
import subprocess
import pandas as pd
import io
import os
import tempfile
In [5]:
# Plotting imports
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%config Inline.Backend.figure_format = 'retina'
In [5]:
history = {'icat': History('sqlite:///results/icat/hl-1_icat.db'),
'ical': History('sqlite:///results/ical/hl-1_ical.db'),
'ina': History('sqlite:///results/ina/hl-1_ina.db'),
'ikr': History('sqlite:///results/ikr/hl-1_ikr.db'),
'ikur': History('sqlite:///results/ikur/hl-1_ikur.db'),
'ito': History('sqlite:///results/ito/hl-1_ito.db'),
'iha': History('sqlite:///results/iha/hl-1_iha.db'),
'ik1': History('sqlite:///results/ik1/hl-1_ik1.db')}
In [6]:
# Set to correct result (last ABC run in database).
for _,h in history.items():
h.id = len(h.all_runs())
In [7]:
# Generate a sample from all history items.
def generate_sample(history, n):
samples = [dict() for i in range(n)]
for k, h in history.items():
dist = h.get_distribution(m=0)
weights = dist[1]
ch_samples = dist[0] \
.sample(n, weights=weights, replace=True) \
.to_dict(orient='records')
ch_samples = [{k+'.'+key: value for key, value in ch.items()}
for ch in ch_samples]
for c, s in zip(ch_samples, samples):
s.update(c)
return samples
In [8]:
# Generate samples
n_samples = 10000
param_samples = generate_sample(history, n_samples)
In [9]:
from channels.hl1 import hl1 as model
In [10]:
model.add_external_par_samples(param_samples)
In [11]:
model.sample({})
Out[11]:
In [12]:
# Targets from Dias, 2014 from HL1-6 myocyte
measurements = model.get_experiment_data()
obs = measurements.to_dict()['y']
exp = measurements.to_dict()['exp']
errs = measurements.to_dict()['errs']
In [13]:
measurements
Out[13]:
In [14]:
# Treat each entry as a separate experiment
for k, _ in exp.items():
exp[k] = k
In [15]:
limits = {'ik1.g_K1': (0, 0.1),
'incx.k_NCX': (0, 20),
'icab.g_Cab': (0, 0.001),
'inab.g_Nab': (0, 0.01),
'inak.i_NaK_max': (0, 10),
'serca.V_max': (0, 10),
'ca_diffusion.tau_tr': (0, 1000),
'ryanodine_receptors.k_RyR': (0, 0.1)}
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
In [15]:
parameters = [k for k in limits.keys()]
In [16]:
distance_fn=IonChannelDistance(
obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.01)
In [17]:
sns.set_context('talk')
g = plot_distance_weights(model, distance_fn)
In [18]:
print(distance_fn.w)
In [19]:
g.savefig('results/extra/dist_weights.pdf')
In [21]:
sns.set_context('talk')
grid1, grid2 = plot_parameter_sensitivity(
model,
parameters,
distance_fn,
sigma=0.1,
n_samples=100,
plot_cutoff=0.05)
In [22]:
grid1.savefig('results/extra/sensitivity.pdf')
grid2.savefig('results/extra/sensitivity_fit.pdf')
In [16]:
db_path = ('sqlite:///' +
os.path.join(tempfile.gettempdir(), "hl-1_extra.db"))
print(db_path)
In [17]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
In [19]:
abc = ABCSMC(models=model,
parameter_priors=prior,
distance_function=IonChannelDistance(
obs=obs,
exp_map=exp,
err_bars=errs,
err_th=0.01),
population_size=AdaptivePopulationSize(
start_nr_particles=5000,
mean_cv=0.2,
max_population_size=5000,
min_population_size=500),
summary_statistics=ion_channel_sum_stats_calculator,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(),
sampler=MulticoreEvalParallelSampler(n_procs=12),
acceptor=IonChannelAcceptor())
In [20]:
abc_id = abc.new(db_path, obs)
In [21]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=10, min_acceptance_rate=0.001)
In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)
In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)
In [6]:
#db_path = 'sqlite:///results/extra/hl-1_extra.db'
db_path = 'sqlite:////scratch/cph211/tmp/hl-1_extra.db'
history = History(db_path)
history.all_runs()
Out[6]:
In [7]:
history.id = 1
In [8]:
evolution = history.get_all_populations()
grid = sns.relplot(x='t', y='epsilon', size='samples', data=evolution[evolution.t>=0])
grid.savefig('results/extra/eps_evolution.pdf')
In [26]:
df, w = history.get_distribution(m=0)
In [27]:
from ionchannelABC import plot_parameters_kde
g = plot_parameters_kde(df, w, limits, aspect=5, height=1.1)
In [21]:
# Generate parameter samples
n_samples = 100
df, w = history.get_distribution(m=0)
th_samples = df.sample(n=n_samples, weights=w, replace=True).to_dict(orient='records')
In [ ]:
# Generate sim results samples
samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
output = model.sample(pars=th)
output['sample'] = i
output['distribution'] = 'post'
samples = samples.append(output, ignore_index=True)
In [45]:
samples.head(10)
Out[45]:
In [46]:
import scipy.stats as st
def evaluate_samples(samples):
for measurement in samples['x'].unique():
filtered = samples[samples.x==measurement]['y'].tolist()
rv = st.rv_discrete(values=(filtered, [1/len(filtered),]*len(filtered)))
print("{}, median: {}".format(measurement, rv.median()))
print("{}, 95% CI: {}".format(measurement, rv.interval(0.95)))
In [48]:
evaluate_samples(samples)
In [49]:
samp_pivot = samples.pivot(index='sample', columns='x', values='y')
In [50]:
from sklearn.cluster import AffinityPropagation
In [51]:
af = AffinityPropagation()
In [52]:
af.fit(samp_pivot.values)
Out[52]:
In [53]:
centers = pd.DataFrame(af.cluster_centers_, columns=samp_pivot.columns)
In [55]:
af.cluster_centers_indices_
Out[55]:
In [56]:
sns.distplot(af.labels_, bins=len(af.cluster_centers_))
Out[56]:
In [9]:
from ionchannelABC import (Experiment, ExperimentStimProtocol)
In [10]:
# Create new experiment to generate traces
n_pulses = 1000
stim_times = [100000] + [2, 998]*n_pulses + [2, 998]
stim_levels = [0] + [40, 0]*n_pulses + [40, 0]
time = np.linspace(0, 1000, 5000) # interpolation time points
def trace_measurements(data_list):
import numpy as np
data = data_list[0]
if len(data_list) > 1:
for log in data_list[1:]:
data = data.extend(log)
simtime = data['environment.time']
simtime_min = min(simtime)
simtime = [t - simtime_min for t in simtime]
data_output = dict()
data['environment.time'] = simtime
for var in data:
data_output[var] = np.interp(time, simtime, data[var])
return data_output
def unwrap(data, ind_var):
return data[0], True
trace_prot = ExperimentStimProtocol(stim_times, stim_levels,
measure_index=range(len(stim_times)-2, len(stim_times)),
measure_fn=trace_measurements,
post_fn=unwrap)
dias_conditions = dict(T=305, Ca_o=1800, Na_o=1.4e5, K_o=4e3)
trace_exper = Experiment(trace_prot, None, dias_conditions)
In [11]:
model.add_experiments([trace_exper])
In [22]:
# Generate traces
traces = pd.DataFrame({})
for i, th in enumerate(th_samples):
output = model.sample(pars=th,exp_num=2)
output['sample'] = i
traces = traces.append(output, ignore_index=True)
In [75]:
import pickle
with open('traces.pkl','wb') as f:
pickle.dump(traces, f)
In [12]:
import pickle
with open('traces.pkl','rb') as f:
traces = pickle.load(f)
In [19]:
traces.x.unique()
Out[19]:
In [134]:
val = pd.DataFrame(traces[traces.x=='ina.i_Na'].y.tolist())
In [135]:
plotdata = val[9:10].melt()
In [136]:
sns.lineplot(x='variable',y='value',data=plotdata,estimator=None)
Out[136]:
In [74]:
sns.lineplot(x='variable',y='value',data=val.melt())
Out[74]: