Fitting remaining ion current conductances

Using full AP/CaT measures to fit the remaining conductances.

Imports


In [1]:
# PyABC imports
from pyabc import (ABCSMC, Distribution, RV,
                   History, MedianEpsilon)
from pyabc.populationstrategy import AdaptivePopulationSize
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler

In [2]:
# Custom imports
from ionchannelABC import (ion_channel_sum_stats_calculator,
                           IonChannelAcceptor,
                           IonChannelDistance,
                           EfficientMultivariateNormalTransition,
                           calculate_parameter_sensitivity,
                           plot_parameter_sensitivity,
                           plot_regression_fit,
                           plot_parameters_kde,
                           plot_distance_weights)


INFO:myokit:Loading Myokit version 1.27.4

In [3]:
# Other necessary imports
import numpy as np
import subprocess
import pandas as pd
import io
import os
import tempfile

In [4]:
# Plotting imports
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%config Inline.Backend.figure_format = 'retina'

Generate prior including ABC channel results


In [5]:
history = {'icat': History('sqlite:///results/icat/hl-1_icat.db'),
           'ical': History('sqlite:///results/ical/hl-1_ical.db'),
           'ina': History('sqlite:///results/ina/hl-1_ina.db'),
           'ikr': History('sqlite:///results/ikr/hl-1_ikr.db'),
           'ikur': History('sqlite:///results/ikur/hl-1_ikur.db'),
           'ito': History('sqlite:///results/ito/hl-1_ito.db'),
           'iha': History('sqlite:///results/iha/hl-1_iha.db'),
           'ik1': History('sqlite:///results/ik1/hl-1_ik1.db')}

In [6]:
# Set to correct result (last ABC run in database).
for _,h in history.items():
    h.id = len(h.all_runs())

In [7]:
# Create a prior from each channel's ABC posterior
from pyabc.transition import MultivariateNormalTransition
class rv_normal_fit(MultivariateNormalTransition):
    def __init__(self, key):
        self.key = key
        super().__init__()
    
    def fit(self, X, w):
        X.columns = [self.key]
        super().fit(pd.DataFrame(X), w)
        
    def rvs(self):
        sample = super().rvs()
        return sample.values[0]
    
    def pdf(self, x):
        # wrap in pd.Series
        x = pd.Series({self.key: x})
        val = super().pdf(x)
        return val

In [24]:
# Add to priors
prior = {}
for k, h in history.items():
    df, w = h.get_distribution(m=0)
    for key in df.keys():
        dist = rv_normal_fit(k+'.'+key)
        dist.fit(pd.DataFrame(df[key]), w)
        prior[k+'.'+key] = dist


/scratch/cph211/miniconda3/envs/ionchannelABC/lib/python3.6/site-packages/pyabc-0.9.1-py3.6.egg/pyabc/storage/history.py:200: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
  w_arr = w.w.as_matrix()
/scratch/cph211/miniconda3/envs/ionchannelABC/lib/python3.6/site-packages/pyabc-0.9.1-py3.6.egg/pyabc/transition/multivariatenormal.py:64: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
  self._X_arr = X.as_matrix()

Create full cell model for ABC


In [9]:
from channels.hl1 import hl1 as model

In [10]:
model.sample({})


Out[10]:
x y exp
0 vrp -71.196756 0
1 apa 119.476141 0
2 apd90 34.084810 0
3 t_ca_rise 40.873996 0
4 t_ca50 664.303847 0
5 t_ca90 2636.223357 0

In [11]:
# Targets from Dias, 2014 from HL1-6 myocyte
measurements = model.get_experiment_data()
obs = measurements.to_dict()['y']
exp = measurements.to_dict()['exp']
errs = measurements.to_dict()['errs']

In [12]:
measurements


Out[12]:
errs exp x y
0 2.0 0 vrp -67.0
1 2.0 0 apa 105.0
2 9.0 0 apd90 42.0
3 2.0 0 t_ca_rise 52.0
4 6.0 0 t_ca50 157.0
5 14.0 0 t_ca90 397.0

In [19]:
# Treat each entry as a separate experiment
for k, _ in exp.items():
    exp[k] = k

In [25]:
# Add additional parameters to be fit using full cell
# measurements as uniform priors.
limits = {'ik1.g_K1': (0, 0.1),
          'incx.k_NCX': (0, 20),
          'icab.g_Cab': (0, 0.001),
          'inab.g_Nab': (0, 0.01),
          'inak.i_NaK_max': (0, 10)}
          #'serca.V_max': (0, 10),
          #'ca_diffusion.tau_tr': (0, 1000)}
          #'ryanodine_receptors.k_RyR': (0, 0.1)}
for key, (a,b) in limits.items():
    prior[key] = RV("uniform", a, b-a)

In [26]:
len(prior)


Out[26]:
87

In [27]:
prior = Distribution(**prior)

Test parameter sensitivity


In [31]:
parameters = list(prior.keys())

In [32]:
distance_fn=IonChannelDistance(
    obs=obs,
    exp_map=exp,
    err_bars=errs,
    err_th=0.01)

In [33]:
sns.set_context('talk')
g = plot_distance_weights(model, distance_fn)



In [34]:
print(distance_fn.w)


{0: {0: 1.47341429363948, 1: 1.8445147259463965, 2: 0.2592385649649729, 3: 0.8605942669748996, 4: 1.2980432081007156, 5: 0.7518237636522056, 6: 0.5123711767213295}}

In [35]:
g.savefig('results/extra/dist_weights.pdf')

In [36]:
fitted, regression_fit, r2 = calculate_parameter_sensitivity(
    model,
    parameters,
    distance_fn,
    sigma=0.025,
    n_samples=5000)

In [37]:
# Finding insensitive parameters
cutoff = 0.05
fitted_pivot = fitted.pivot(index='param',columns='exp')
insensitive_params = fitted_pivot[(abs(fitted_pivot['beta'][0])<cutoff) & (abs(fitted_pivot['beta'][1])<cutoff) &
             (abs(fitted_pivot['beta'][2])<cutoff) & (abs(fitted_pivot['beta'][3])<cutoff) &
             (abs(fitted_pivot['beta'][4])<cutoff) & (abs(fitted_pivot['beta'][5])<cutoff) &
             (abs(fitted_pivot['beta'][6])<cutoff)].index.values

In [28]:
insensitive_params = np.array(['ical.q1', 'ical.q2', 'ical.q3', 'ical.q4',
       'ical.q5', 'ical.q9', 'icat.g_CaT', 'icat.p3', 'icat.p4',
       'icat.p5', 'icat.p6', 'icat.q2', 'icat.q3', 'icat.q4', 'icat.q5',
       'icat.q6', 'iha.p3', 'iha.p4', 'iha.p5', 'iha.p6', 'iha.p7',
       'ikr.k_f', 'ikr.p5', 'ikr.p6', 'ikr.q5', 'ikr.q6', 'ikur.k_atau3',
       'ikur.k_iss1', 'ikur.k_iss2', 'ikur.k_itau1', 'ikur.k_itau2',
       'ina.p2', 'ina.p3', 'ina.p4', 'ina.q11', 'ina.r11', 'ina.r12',
       'ina.r13', 'ina.r16', 'ito.k_xss1', 'ito.k_xtau1', 'ito.k_xtau2',
       'ito.k_xtau3', 'ito.k_yss1', 'ito.k_yss2', 'ito.k_ytau1',
       'ito.k_ytau2', 'ito.k_ytau3', 'ito.k_ytau4'])

In [29]:
len(insensitive_params)


Out[29]:
49

In [38]:
sns.set_context('talk')
grid = plot_parameter_sensitivity(fitted, plot_cutoff=0.05)



In [40]:
grid2 = plot_regression_fit(regression_fit, r2)



In [41]:
grid.savefig('results/extra/sensitivity.pdf')
grid2.savefig('results/extra/sensitivity_fit.pdf')

In [30]:
# Generate a sample from history items insensitive to full cell model measurements
def generate_sample(history, insensitive_params, n):
    samples = [dict() for i in range(n)]
    for k, h in history.items():
        dist = h.get_distribution(m=0)
        weights = dist[1]
        ch_samples = dist[0] \
                    .sample(n, weights=weights, replace=True) \
                    .to_dict(orient='records')
        ch_samples = [{k+'.'+key: value for key, value in ch.items()
                      if (k+'.'+key) in insensitive_params}
                      for ch in ch_samples]
        for c, s in zip(ch_samples, samples):
            s.update(c)
    return samples

In [31]:
# Generate samples
n_samples = 5000
param_samples = generate_sample(history, insensitive_params, n_samples)

In [32]:
model.add_external_par_samples(param_samples)

In [33]:
# New prior without insensitive parameters
prior = {}
for k, h in history.items():
    df, w = h.get_distribution(m=0)
    for key in df.keys():
        if (k+'.'+key) not in insensitive_params:
            dist = rv_normal_fit(k+'.'+key)
            dist.fit(pd.DataFrame(df[key]), w)
            prior[k+'.'+key] = dist

In [34]:
limits = {'ik1.g_K1': (0, 0.1),
          'incx.k_NCX': (0, 20),
          'icab.g_Cab': (0, 0.001),
          'inab.g_Nab': (0, 0.01),
          'inak.i_NaK_max': (0, 10)}
          #'serca.V_max': (0, 10),
          #'ca_diffusion.tau_tr': (0, 1000)}
          #'ryanodine_receptors.k_RyR': (0, 0.1)}
for key, (a,b) in limits.items():
    prior[key] = RV("uniform", a, b-a)

In [35]:
prior = Distribution(**prior)

Initialise database


In [36]:
db_path = ('sqlite:///' + 
           os.path.join(tempfile.gettempdir(), "hl-1_extra-fullABC.db"))
print(db_path)


sqlite:////scratch/cph211/tmp/hl-1_extra-fullABC.db

In [37]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
cv_logger = logging.getLogger('CV Estimation')
cv_logger.setLevel(logging.DEBUG)

In [38]:
from pyabc.sampler import SingleCoreSampler
from pyabc.populationstrategy import ConstantPopulationSize

In [39]:
abc = ABCSMC(models=model,
             parameter_priors=prior,
             distance_function=IonChannelDistance(
                 obs=obs,
                 exp_map=exp,
                 err_bars=errs,
                 err_th=0.01),
             population_size=ConstantPopulationSize(5000),
             #population_size=AdaptivePopulationSize(
             #    start_nr_particles=5000,
             #    mean_cv=0.2,
             #    max_population_size=5000,
             #    min_population_size=100),
             summary_statistics=ion_channel_sum_stats_calculator,
             transitions=EfficientMultivariateNormalTransition(scaling=0.2),
             eps=MedianEpsilon(median_multiplier=1.5),
             sampler=MulticoreEvalParallelSampler(n_procs=6),
             #sampler=SingleCoreSampler(),
             acceptor=IonChannelAcceptor())


DEBUG:ABC:ion channel weights: {0: 1.6223175965665235, 1: 1.6223175965665235, 2: 0.3605150214592274, 3: 1.6223175965665235, 4: 0.5407725321888411, 5: 0.23175965665236045}
DEBUG:Epsilon:init quantile_epsilon initial_epsilon=from_sample, quantile_multiplier=1.5

In [40]:
abc_id = abc.new(db_path, obs)


INFO:History:Start <ABCSMC(id=10, start_time=2018-11-08 17:00:40.888809, end_time=None)>
/scratch/cph211/miniconda3/envs/ionchannelABC/lib/python3.6/site-packages/pyabc-0.9.1-py3.6.egg/pyabc/epsilon.py:321: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
  distances = weighted_distances.distance.as_matrix()
/scratch/cph211/miniconda3/envs/ionchannelABC/lib/python3.6/site-packages/pyabc-0.9.1-py3.6.egg/pyabc/epsilon.py:325: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
  weights = weighted_distances.w.as_matrix()
INFO:Epsilon:initial epsilon is 145.16114842760413

In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)


INFO:ABC:t:0 eps:145.16114842760413
DEBUG:ABC:now submitting population 0

In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)

In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)

In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)

In [ ]:
history = abc.run(minimum_epsilon=0.01, max_nr_populations=5, min_acceptance_rate=0.001)

Results analysis


In [32]:
#db_path = 'sqlite:///results/extra/hl-1_extra.db'
db_path = 'sqlite:////scratch/cph211/tmp/hl-1_extra-fullABC.db'
history = History(db_path)
history.all_runs()


Out[32]:
[<ABCSMC(id=1, start_time=2018-10-25 10:33:29.505547, end_time=2018-10-25 13:47:42.332862)>,
 <ABCSMC(id=2, start_time=2018-10-26 09:45:50.616048, end_time=2018-10-26 12:45:04.971593)>,
 <ABCSMC(id=3, start_time=2018-10-27 12:49:46.190110, end_time=2018-10-27 17:41:21.864034)>,
 <ABCSMC(id=4, start_time=2018-10-29 08:06:49.407689, end_time=2018-10-29 17:28:28.269884)>,
 <ABCSMC(id=5, start_time=2018-10-29 18:04:30.030759, end_time=2018-10-30 02:00:51.793046)>,
 <ABCSMC(id=6, start_time=2018-10-30 09:21:10.624990, end_time=2018-10-30 15:38:00.489099)>,
 <ABCSMC(id=7, start_time=2018-11-01 11:12:27.634390, end_time=2018-11-01 17:35:40.242678)>,
 <ABCSMC(id=8, start_time=2018-11-05 13:22:01.578477, end_time=2018-11-05 14:20:55.569620)>,
 <ABCSMC(id=9, start_time=2018-11-06 11:07:12.510896, end_time=None)>]

In [33]:
#history.id = 15 # run with err=0.1 and scaling=0.25
#history.id = 17 # run with err=0.01 and scaling=0.5
#history.id = 23 # run with new icat channel fitting using v_offset and no ion concentrations
#history.id = 24 # run with new icat channel fitting using v_offset, no ion concentrations and no APA
history.id = 9

In [8]:
evolution = history.get_all_populations()
grid = sns.relplot(x='t', y='epsilon', size='samples', data=evolution[evolution.t>=0])
#grid.savefig('results/extra/eps_evolution.pdf')



In [34]:
df, w = history.get_distribution(m=0)

In [35]:
df_extra = df.loc[:,[k for k in limits.keys()]]

In [36]:
from ionchannelABC import plot_parameters_kde
g = plot_parameters_kde(df_extra, w, limits, aspect=5, height=1.1)


Samples for quantitative analysis


In [37]:
# Generate parameter samples
n_samples = 100
df, w = history.get_distribution(m=0)
th_samples = df.sample(n=n_samples, weights=w, replace=True).to_dict(orient='records')

In [38]:
# Generate sim results samples
samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
    output = model.sample(pars=th)
    output['sample'] = i
    output['distribution'] = 'post'
    samples = samples.append(output, ignore_index=True)


/scratch/cph211/miniconda3/envs/ionchannelABC/lib/python3.6/site-packages/pandas/core/frame.py:6211: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version
of pandas will change to not sort by default.

To accept the future behavior, pass 'sort=False'.

To retain the current behavior and silence the warning, pass 'sort=True'.

  sort=sort)

Evaluate results


In [39]:
import scipy.stats as st
def evaluate_samples(samples):
    for measurement in samples['x'].unique():
        filtered = samples[samples.x==measurement]['y'].tolist()
        rv = st.rv_discrete(values=(filtered, [1/len(filtered),]*len(filtered)))
        print("{}, median: {}".format(measurement, rv.median()))
        print("{}, 95% CI: {}".format(measurement, rv.interval(0.95)))

In [40]:
evaluate_samples(samples)


vrp, median: -73.63377956176151
vrp, 95% CI: (-75.00854470785241, -64.25515639537589)
apa, median: 103.0054932714966
apa, 95% CI: (92.03557776332427, 117.87973148907568)
apd90, median: 41.55234894783435
apd90, 95% CI: (23.95423664912648, 126.36275421663387)
ca_amp, median: 0.19509103293842772
ca_amp, 95% CI: (0.09135106513015805, 0.8002630334838956)
t_ca_rise, median: 34.898753186621306
t_ca_rise, 95% CI: (26.846554348278524, 122.5109254679576)
t_ca50, median: 125.0334327023177
t_ca50, 95% CI: (114.1090662019172, 198.95284956805077)
t_ca90, median: 360.9864590043934
t_ca90, 95% CI: (321.560835368449, 445.98806166855087)

Test for specific modes of AP model


In [49]:
samp_pivot = samples.pivot(index='sample', columns='x', values='y')

In [50]:
from sklearn.cluster import AffinityPropagation

In [51]:
af = AffinityPropagation()

In [52]:
af.fit(samp_pivot.values)


Out[52]:
AffinityPropagation(affinity='euclidean', convergence_iter=15, copy=True,
          damping=0.5, max_iter=200, preference=None, verbose=False)

In [53]:
centers = pd.DataFrame(af.cluster_centers_, columns=samp_pivot.columns)

In [55]:
af.cluster_centers_indices_


Out[55]:
array([  32,   45,  334,  576,  618,  644,  769,  796,  938,  964, 1000,
       1056, 1062, 1122, 1130, 1182, 1238, 1244, 1258, 1263, 1276, 1289,
       1331, 1394, 1475, 1509, 1589, 1604, 1642, 1688, 1703, 1736, 1899,
       1905, 1965, 1971, 2006, 2068, 2082, 2102, 2134, 2137, 2141, 2220,
       2328, 2412, 2451, 2642, 2700, 2746, 2754, 2770, 2842, 2853, 2864,
       2925, 2963, 3027, 3028, 3029, 3083, 3093, 3242, 3264, 3377, 3385,
       3433, 3534, 3551, 3575, 3606, 3691, 3840, 3865, 3870, 4001, 4016,
       4043, 4115, 4121, 4248, 4282, 4351, 4360, 4406, 4410, 4431, 4595,
       4632, 4683, 4695, 4701, 4784])

In [56]:
sns.distplot(af.labels_, bins=len(af.cluster_centers_))


Out[56]:
<matplotlib.axes._subplots.AxesSubplot at 0x7fd2d015b160>

Plot sample of voltage traces


In [41]:
from ionchannelABC import (Experiment, ExperimentStimProtocol)

In [87]:
# Create new experiment to generate traces
pulses = range(5, 45, 5)
stim_times = [1000000] + [2, 4998]*len(pulses)
stim_levels = [0]
for level in pulses:
    stim_levels.append(level)
    stim_levels.append(0)
#pace_time = np.linspace(0, 10000, 10000)
pace_time = np.linspace(0, sum(stim_times[1:]), sum(stim_times[1:]))

def trace_measurements(data_list):
    import numpy as np
    data = data_list[0]
    if len(data_list) > 1:
        for log in data_list[1:]:
            data = data.extend(log)
    simtime = data['environment.time']
    simtime_min = min(simtime)
    simtime = [t - simtime_min for t in simtime]
    data_output = dict()
    data['environment.time'] = simtime
    for var in data:
        data_output[var] = np.interp(pace_time, simtime, data[var])
    return data_output
def unwrap(data, ind_var):
    return data[0], True

trace_prot = ExperimentStimProtocol(stim_times, stim_levels, 
                                    #measure_index=range(len(stim_times)-4, len(stim_times)),
                                    measure_index=range(1, len(stim_times)),
                                    measure_fn=trace_measurements,
                                    post_fn=unwrap)
dias_conditions = dict(T=305, Ca_o=1800, Na_o=1.4e5, K_o=4e3)
trace_exper = Experiment(trace_prot, None, dias_conditions)

In [88]:
model.add_experiments([trace_exper])

In [93]:
# Generate traces
traces = pd.DataFrame({})
for i, th in enumerate(th_samples[0:10]):
    output = model.sample(pars=th,exp_num=2)
    output['sample'] = i
    traces = traces.append(output, ignore_index=True)

In [45]:
import pickle
with open('traces.pkl','wb') as f:
    pickle.dump(traces, f)

In [37]:
import pickle
with open('traces.pkl','rb') as f:
    traces = pickle.load(f)

In [94]:
traces.x.unique()


Out[94]:
array(['membrane.V', 'ical.d', 'ical.f', 'ical.fCa', 'ina.m', 'ina.h',
       'ina.j', 'ito.xto', 'ito.yto', 'ikr.C_K1', 'ikr.C_K2', 'ikr.O_K',
       'ikr.I_K', 'ikur.a_ur', 'ikur.i_ur', 'icat.b', 'icat.g', 'iha.y',
       'ryanodine_receptors.P_open', 'ca_conc_sr.Ca_SRuptake',
       'ca_conc_sr.Ca_SRrelease', 'ca_conc.Ca_i', 'na_conc.Na_i',
       'k_conc.K_i', 'environment.time', 'ina.E_Na', 'ina.G_Na',
       'ina.i_Na', 'ina.m_ss', 'ina.tau_m', 'ina.h_ss', 'ina.tau_h_high',
       'ina.tau_h_low', 'ina.tau_h', 'ina.j_ss', 'ina.tau_j_high',
       'ina.tau_j_low', 'ina.tau_j', 'ikr.G_Kr', 'ikr.E_Kr', 'ikr.i_Kr',
       'ikr.alpha_a0', 'ikr.alpha_a1', 'ikr.alpha_i', 'ikr.beta_a0',
       'ikr.beta_a1', 'ikr.beta_i', 'ikr.C_K0', 'ikur.G_Kur',
       'ikur.i_Kur', 'ikur.a_ur_ss', 'ikur.tau_a_ur', 'ikur.i_ur_ss',
       'ikur.tau_i_ur', 'ito.G_to', 'ito.i_to', 'ito.xto_ss',
       'ito.tau_xto', 'ito.yto_ss', 'ito.tau_yto', 'iha.G_ha',
       'iha.i_haNa', 'iha.i_haK', 'iha.i_ha', 'iha.y_ss', 'iha.tau_y',
       'icat.G_CaT', 'icat.i_CaT', 'icat.bss', 'icat.tau_b', 'icat.gss',
       'icat.tau_g', 'ical.i_CaL', 'ical.open', 'ical.G_CaL', 'ical.d_ss',
       'ical.alpha_d', 'ical.beta_d', 'ical.gamma_d', 'ical.tau_d',
       'ical.f_ss', 'ical.tau_f', 'ical.alpha_fCa', 'ical.beta_fCa',
       'ical.gamma_fCa', 'ical.fCa_ss', 'ical.k', 'ik1.i_K1',
       'incx.i_NCX', 'inak.f_NaK', 'inak.i_NaK', 'icab.i_Cab',
       'inab.i_Nab', 'ryanodine_receptors.J_RyR',
       'ryanodine_receptors.J_RyRCaffeine', 'ryanodine_receptors.K_mRyR',
       'ryanodine_receptors.P_closed', 'serca.J_SERCA',
       'leak_flux.J_leak', 'ca_diffusion.J_tr',
       'ca_conc_sr.beta_SRrelease', 'ca_conc_sr.Ca_SR', 'na_conc.E_Na',
       'k_conc.E_K', 'ca_conc.J_CaSR', 'ca_conc.J_CaSL', 'ca_conc.beta',
       'ca_conc.Ca_subSR', 'ca_conc.Ca_subSL', 'ca_conc.E_Ca',
       'dot(membrane.V)', 'dot(ical.d)', 'dot(ical.f)', 'dot(ical.fCa)',
       'dot(ina.m)', 'dot(ina.h)', 'dot(ina.j)', 'dot(ito.xto)',
       'dot(ito.yto)', 'dot(ikr.C_K1)', 'dot(ikr.C_K2)', 'dot(ikr.O_K)',
       'dot(ikr.I_K)', 'dot(ikur.a_ur)', 'dot(ikur.i_ur)', 'dot(icat.b)',
       'dot(icat.g)', 'dot(iha.y)', 'dot(ryanodine_receptors.P_open)',
       'dot(ca_conc_sr.Ca_SRuptake)', 'dot(ca_conc_sr.Ca_SRrelease)',
       'dot(ca_conc.Ca_i)', 'dot(na_conc.Na_i)', 'dot(k_conc.K_i)'],
      dtype=object)

In [95]:
time = traces[traces.x=='environment.time'].y.tolist()[0]

In [102]:
#plotlims = (4950, 5400)
plotlims = [(4950+5000*i,5400+5000*i) for i in range(7)]

In [106]:
#idxlims = np.intersect1d(np.where(time >= plotlims[0]), np.where(time <= plotlims[1]))
idxlims = [np.intersect1d(np.where(time >= plotlims[i][0]), np.where(time <= plotlims[i][1]))
           for i in range(7)]

In [108]:
#time_limited = time[idxlims[0]:idxlims[-1]]
time_limited = time[idxlims[0][0]:idxlims[0][-1]]

In [110]:
traces = traces.set_index(['x','sample'])

In [111]:
del traces['exp']

In [112]:
traces.reset_index(inplace=True)

In [140]:
rows = []

In [141]:
#_ = traces.apply(lambda row: [rows.append([row['x'], row['sample'], yi, ti])
#                              for (ti, yi) in zip(time_limited, row.y[idxlims[0]:idxlims[-1]])], axis=1)
for i in range(7):
    _ = traces.apply(lambda row: [rows.append([row['x'], row['sample']+10*i, yi, ti])
                                  for (ti, yi) in zip(time_limited, row.y[idxlims[i][0]:idxlims[i][-1]])], axis=1)

In [142]:
traces_new = pd.DataFrame(rows, columns=['x', 'sample', 'y', 'time']).set_index(['x', 'sample'])

In [143]:
traces_new.reset_index(inplace=True)

In [144]:
no_ap_samples = []

In [161]:
for i in range(max(traces_new['sample'])):
    if max(traces_new[(traces_new['sample']==i) & (traces_new['x']=='membrane.V')]['y']) < 0:
        no_ap_samples.append(i)

In [162]:
no_ap_samples


Out[162]:
[2, 3, 9, 12]

In [167]:
for val in no_ap_samples:
    traces_new = traces_new[~(traces_new['sample']==val)]

In [192]:
plot = sns.lineplot(x='time',y='y',hue='x',
                    data=traces_new[(traces_new.x=='ik1.i_K1') | (traces_new.x=='incx.i_NCX') |
                                    (traces_new.x=='inak.i_NaK') | (traces_new.x=='iha.i_ha') |
                                    (traces_new.x=='inab.i_Nab') | (traces_new.x=='icab.i_Cab')],
                    estimator=np.median, ci=95)



In [193]:
fig = plot.get_figure()
fig.savefig('results/extra/small_currents.pdf')

In [225]:
grid = sns.relplot(x='time', y='y', data=traces_new, row='x',
                   estimator=np.median, ci=95, kind='line',
                   facet_kws={'sharex': 'row',
                              'sharey': False})