In [1]:
from channels.ina_testing import (dias2014_iv,
nakajima_inactivation,
zhang_recovery)
In [2]:
from ionchannelABC.experiment import setup
In [3]:
modelfile = 'models/ina_markov.mmt'
In [4]:
observations, model, summary_statistics = setup(modelfile,
dias2014_iv,
nakajima_inactivation,
zhang_recovery)
In [6]:
test = model(prior.rvs())
In [8]:
ss = summary_statistics(test)
In [11]:
assert(len(ss)==len(observations))
In [5]:
from pyabc import Distribution, RV
limits = {'ina.g_Na': (0., 100.),
'ina.E_Na_offset': (-100, 100),
'log_ina.p_1': (-7., 3.),
'ina.p_2': (1e-7, 0.4),
'log_ina.p_3': (-7., 3.),
'ina.p_4': (1e-7, 0.4),
'log_ina.p_5': (-7., 3.),
'ina.p_6': (1e-7, 0.4),
'log_ina.p_7': (-7., 3.),
'ina.p_8': (1e-7, 0.4)}
prior = Distribution(**{key: RV("uniform", a, b - a)
for key, (a,b) in limits.items()})
In [8]:
test = log_model(prior.rvs())
In [9]:
import os, tempfile
db_path = ("sqlite:///" +
os.path.join(tempfile.gettempdir(), "hl-1_ina_markov.db"))
print(db_path)
In [10]:
# Let's log all the sh!t
import logging
logging.basicConfig()
abc_logger = logging.getLogger('ABC')
abc_logger.setLevel(logging.DEBUG)
eps_logger = logging.getLogger('Epsilon')
eps_logger.setLevel(logging.DEBUG)
cv_logger = logging.getLogger('CV Estimation')
cv_logger.setLevel(logging.DEBUG)
In [11]:
from pyabc.populationstrategy import AdaptivePopulationSize, ConstantPopulationSize
from ionchannelABC import theoretical_population_size
pop_size = theoretical_population_size(2, len(limits))
print("Theoretical minimum population size is {} particles".format(pop_size))
In [12]:
from pyabc import ABCSMC
from pyabc.epsilon import MedianEpsilon
from pyabc.sampler import MulticoreEvalParallelSampler, SingleCoreSampler
from ionchannelABC import IonChannelDistance, EfficientMultivariateNormalTransition, IonChannelAcceptor
abc = ABCSMC(models=log_model,
parameter_priors=prior,
distance_function=IonChannelDistance(
exp_id=list(observations.exp_id),
variance=list(observations.variance),
delta=0.05),
population_size=ConstantPopulationSize(10000),
summary_statistics=summary_statistics,
transitions=EfficientMultivariateNormalTransition(),
eps=MedianEpsilon(initial_epsilon=100),
sampler=MulticoreEvalParallelSampler(n_procs=6),
acceptor=IonChannelAcceptor())
In [13]:
obs = observations.to_dict()['y']
obs = {str(k): v for k, v in obs.items()}
In [14]:
abc_id = abc.new(db_path, obs)
In [ ]:
history = abc.run(minimum_epsilon=0.0, max_nr_populations=100, min_acceptance_rate=0.01)
In [ ]:
history = abc.run(minimum_epsilon=0.0, max_nr_populations=100, min_acceptance_rate=0.005)
In [17]:
from pyabc import History
history = History('sqlite:////scratch/cph211/tmp/hl-1_ina_markov.db')
history.all_runs()
Out[17]:
In [18]:
history.id = 1
df, w = history.get_distribution(m=0)
In [19]:
df.describe()
Out[19]:
In [20]:
from ionchannelABC import plot_parameters_kde
g = plot_parameters_kde(df, w, limits, aspect=12,height=0.6)
In [21]:
# Generate parameter samples
n_samples = 100
df, w = history.get_distribution(m=0)
th_samples = df.sample(n=n_samples, weights=w, replace=True).to_dict(orient='records')
In [22]:
# Generate sim results samples
import pandas as pd
samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
results = summary_statistics(log_model(th))
output = pd.DataFrame({'x': observations.x, 'y': list(results.values()),
'exp_id': observations.exp_id})
#output = model.sample(pars=th, n_x=50)
output['sample'] = i
output['distribution'] = 'post'
samples = samples.append(output, ignore_index=True)
In [23]:
from ionchannelABC import plot_sim_results
import seaborn as sns
sns.set_context('talk')
g = plot_sim_results(samples, obs=observations)
In [113]:
# Require discrete samples for exact measurements at -20mV
discrete_samples = pd.DataFrame({})
for i, th in enumerate(th_samples):
output = model.sample(pars=th)
output['sample'] = i
output['distribution'] = 'post'
discrete_samples = discrete_samples.append(output, ignore_index=True)
In [114]:
# Amplitude at -20 mV
grouped = discrete_samples[discrete_samples['exp']==0].groupby('sample')
def get_amplitude(group):
return group.loc[group.x==-20]['y']
print(grouped.apply(get_amplitude).mean())
print(grouped.apply(get_amplitude).std())
In [115]:
import scipy.stats as st
peak_current = discrete_samples[discrete_samples['exp']==0].groupby('sample').apply(get_amplitude).tolist()
rv = st.rv_discrete(values=(peak_current, [1/len(peak_current),]*len(peak_current)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [116]:
# Voltage and slope factor at half-activation
from scipy.optimize import curve_fit
grouped = samples[samples['exp']==1].groupby('sample')
def fit_boltzmann(group):
def boltzmann(V, Vhalf, Shalf):
return 1/(1+np.exp((Vhalf-V)/Shalf))
guess = (-50, 10)
popt, _ = curve_fit(boltzmann, group.x, group.y, guess)
return popt
output = grouped.apply(fit_boltzmann).apply(pd.Series)
In [117]:
print(output.mean())
print(output.std())
In [118]:
Vhalf = output[0].tolist()
rv = st.rv_discrete(values=(Vhalf, [1/len(Vhalf),]*len(Vhalf)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [119]:
slope = output[1].tolist()
rv = st.rv_discrete(values=(slope, [1/len(slope),]*len(slope)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [120]:
# Voltage and slope factor at half-inactivation
grouped = samples[samples['exp']==2].groupby('sample')
def fit_boltzmann(group):
def boltzmann(V, Vhalf, Shalf):
return 1/(1+np.exp((V-Vhalf)/Shalf))
guess = (-50, 10)
popt, _ = curve_fit(boltzmann, group.x, group.y, guess)
return popt
output = grouped.apply(fit_boltzmann).apply(pd.Series)
In [121]:
print(output.mean())
print(output.std())
In [122]:
Vhalf = output[0].tolist()
rv = st.rv_discrete(values=(Vhalf, [1/len(Vhalf),]*len(Vhalf)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [123]:
slope = output[1].tolist()
rv = st.rv_discrete(values=(slope, [1/len(slope),]*len(slope)))
print("median: {}".format(rv.median()))
print("95% CI: {}".format(rv.interval(0.95)))
In [ ]: