In [1]:
# SBML model we want to import
sbml_file = 'model_steadystate_scaled.xml'
# Name of the model that will also be the name of the python module
model_name = 'model_steadystate_scaled'
# Directory to which the generated model code is written
model_output_dir = model_name
import libsbml
import importlib
import amici
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
In [2]:
sbml_reader = libsbml.SBMLReader()
sbml_doc = sbml_reader.readSBML(sbml_file)
sbml_model = sbml_doc.getModel()
dir(sbml_doc)
print('Species: ', [s.getId() for s in sbml_model.getListOfSpecies()])
print('\nReactions:')
for reaction in sbml_model.getListOfReactions():
reactants = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfReactants()])
products = ' + '.join(['%s %s'%(int(r.getStoichiometry()) if r.getStoichiometry() > 1 else '', r.getSpecies()) for r in reaction.getListOfProducts()])
reversible = '<' if reaction.getReversible() else ''
print('%3s: %10s %1s->%10s\t\t[%s]' % (reaction.getId(),
reactants,
reversible,
products,
libsbml.formulaToL3String(reaction.getKineticLaw().getMath())))
In [3]:
# Create an SbmlImporter instance for our SBML model
sbml_importer = amici.SbmlImporter(sbml_file)
In this example, we want to specify fixed parameters, observables and a $\sigma$ parameter. Unfortunately, the latter two are not part of the SBML standard. However, they can be provided to amici.SbmlImporter.sbml2amici
as demonstrated in the following.
In [4]:
constantParameters = ['k0']
Specifying observables is beyond the scope of SBML. Here we define them manually.
If you are looking for a more scalable way for defining observables, ideally inside the SBML file, then checkout https://github.com/ICB-DCM/PEtab. There use a convention based on SBML's AssignmentRule
to specify Model outputs within the SBML file.
In [5]:
# Define observables
observables = {
'observable_x1': {'name': '', 'formula': 'x1'},
'observable_x2': {'name': '', 'formula': 'x2'},
'observable_x3': {'name': '', 'formula': 'x3'},
'observable_x1_scaled': {'name': '', 'formula': 'scaling_x1 * x1'},
'observable_x2_offsetted': {'name': '', 'formula': 'offset_x2 + x2'},
'observable_x1withsigma': {'name': '', 'formula': 'x1'}
}
In [6]:
sigmas = {'observable_x1withsigma': 'observable_x1withsigma_sigma'}
Now we can generate the python module for our model. amici.SbmlImporter.sbml2amici
will symbolically derive the sensitivity equations, generate C++ code for model simulation, and assemble the python module. Standard logging verbosity levels can be passed to this function to see timestamped progression during code generation.
In [7]:
import logging
sbml_importer.sbml2amici(model_name,
model_output_dir,
verbose=logging.INFO,
observables=observables,
constant_parameters=constantParameters,
sigmas=sigmas)
In [8]:
sys.path.insert(0, os.path.abspath(model_output_dir))
model_module = importlib.import_module(model_name)
And get an instance of our model from which we can retrieve information such as parameter names:
In [9]:
model = model_module.getModel()
print("Model name:", model.getName())
print("Model parameters:", model.getParameterIds())
print("Model outputs: ", model.getObservableIds())
print("Model states: ", model.getStateIds())
It is also possible to use a Python-AMICI imported model from Matlab. You might want to do this because:
In [10]:
print('''To use python-generated model in Matlab, ensure you added AMICI to your matlab path and run:
modelName = '{model_name}';
modelDir = '{model_output_dir}';
amimodel.compileAndLinkModel(modelName, modelDir, [], [], [], []);
amimodel.generateMatlabWrapper({nx}, {ny}, {np}, {nk}, {nz}, {o2flag}, [], [ modelDir '/simulate_' modelName '.m'], modelName, 'lin', 1, 1);
'''.format(model_name=model_name,
model_output_dir=os.path.abspath(model_output_dir),
nx = model.nxtrue_rdata,
ny = model.nytrue,
np = model.np(),
nk = model.nk(),
nz = model.nz,
o2flag = model.o2mode
))
This will use the matlab compiler to generate a mex file and will create a Matlab wrapper script. You have to run this only once after generating the Python model. Afterwards you can use the model from Matlab just as if it was generted using amiwrap.m
directly.
In [11]:
# Create Model instance
model = model_module.getModel()
# set timepoints for which we want to simulate the model
model.setTimepoints(np.linspace(0, 60, 60))
# Create solver instance
solver = model.getSolver()
# Run simulation using default model parameters and solver options
rdata = amici.runAmiciSimulation(model, solver)
In [12]:
print('Simulation was run using model default parameters as specified in the SBML model:')
print(model.getParameters())
Simulation results are provided as numpy.ndarray
s in the returned dictionary:
In [13]:
#np.set_printoptions(threshold=8, edgeitems=2)
for key, value in rdata.items():
print('%12s: ' % key, value)
You might have wondered about expression such as rdata['y'].flatten()
or amici.ExpData(model.get())
. This is currently required to convert between Python data types and internal C++ datatypes. The SWIG interface does not yet do this conversion automatically.
In [14]:
print(model.getParameters())
In [15]:
import amici.plotting
amici.plotting.plotStateTrajectories(rdata, model = model)
amici.plotting.plotObservableTrajectories(rdata, model = model)
Often model parameters need to be inferred from experimental data. This is commonly done by maximizing the likelihood of of observing the data given to current model parameters. AMICI will compute this likelihood if experimental data is provided to amici.runAmiciSimulation
as optional third argument. Measurements along with their standard deviations are provided through an amici.ExpData
instance.
In [16]:
# Create model instance and set time points for simulation
model = model_module.getModel()
model.setTimepoints(np.linspace(0, 10, 11))
# Create solver instance, keep default options
solver = model.getSolver()
# Run simulation without experimental data
rdata = amici.runAmiciSimulation(model, solver)
# Create ExpData instance from simulation results
edata = amici.ExpData(rdata, 1.0, 0.0)
# Re-run simulation, this time passing "experimental data"
rdata = amici.runAmiciSimulation(model, solver, edata)
print('Log-likelihood %f' % rdata['llh'])
Numerical error tolerances are often critical to get accurate results. For the state variables, integration errors can be controlled using setRelativeTolerance
and setAbsoluteTolerance
. Similar functions exist for sensitivies, steadstates and quadratures. We initially compute a reference solution using extremely low tolerances and then assess the influence on integration error for different levels of absolute and relative tolerance.
In [17]:
solver.setRelativeTolerance(1e-16)
solver.setAbsoluteTolerance(1e-16)
solver.setSensitivityOrder(amici.SensitivityOrder.none)
rdata_ref = amici.runAmiciSimulation(model, solver, edata)
def get_simulation_error(solver):
rdata = amici.runAmiciSimulation(model, solver, edata)
return np.mean(np.abs(rdata['x']-rdata_ref['x'])), np.mean(np.abs(rdata['llh']-rdata_ref['llh']))
def get_errors(tolfun, tols):
solver.setRelativeTolerance(1e-16)
solver.setAbsoluteTolerance(1e-16)
x_errs = []
llh_errs = []
for tol in tols:
getattr(solver, tolfun)(tol)
x_err, llh_err = get_simulation_error(solver)
x_errs.append(x_err)
llh_errs.append(llh_err)
return x_errs, llh_errs
atols = np.logspace(-5,-15, 100)
atol_x_errs, atol_llh_errs = get_errors('setAbsoluteTolerance', atols)
rtols = np.logspace(-5,-15, 100)
rtol_x_errs, rtol_llh_errs = get_errors('setRelativeTolerance', rtols)
fig, axes = plt.subplots(1, 2, figsize=(15, 5))
def plot_error(tols, x_errs, llh_errs, tolname, ax):
ax.plot(tols, x_errs, 'r-', label='x')
ax.plot(tols, llh_errs, 'b-', label='llh')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel(f'{tolname} tolerance')
ax.set_ylabel('average numerical error')
ax.legend()
plot_error(atols, atol_x_errs, atol_llh_errs, 'absolute', axes[0])
plot_error(rtols, rtol_x_errs, rtol_llh_errs, 'relative', axes[1])
# reset relative tolerance to default value
solver.setRelativeTolerance(1e-8)
solver.setRelativeTolerance(1e-16)
In [18]:
model = model_module.getModel()
model.setTimepoints(np.linspace(0, 10, 11))
model.requireSensitivitiesForAllParameters() # sensitivities w.r.t. all parameters
# model.setParameterList([1, 2]) # sensitivities
# w.r.t. the specified parameters
model.setParameterScale(amici.ParameterScaling.none) # parameters are used as-is (not log-transformed)
solver = model.getSolver()
solver.setSensitivityMethod(amici.SensitivityMethod.forward) # forward sensitivity analysis
solver.setSensitivityOrder(amici.SensitivityOrder.first) # first-order sensitivities
rdata = amici.runAmiciSimulation(model, solver)
# print sensitivity-related results
for key, value in rdata.items():
if key.startswith('s'):
print('%12s: ' % key, value)
In [19]:
# Set model options
model = model_module.getModel()
p_orig = np.array(model.getParameters())
p_orig[list(model.getParameterIds()).index('observable_x1withsigma_sigma')] = 0.1 # Change default parameter
model.setParameters(p_orig.flatten())
model.setParameterScale(amici.ParameterScaling.none)
model.setTimepoints(np.linspace(0, 10, 21))
solver = model.getSolver()
solver.setMaxSteps(10**4) # Set maximum number of steps for the solver
# simulate time-course to get artificial data
rdata = amici.runAmiciSimulation(model, solver)
edata = amici.ExpData(rdata, 1.0, 0)
edata.fixedParameters = model.getFixedParameters()
# set sigma to 1.0 except for observable 5, so that p[7] is used instead
# (if we have sigma parameterized, the corresponding ExpData entries must NaN, otherwise they will override the parameter)
edata.setObservedDataStdDev(rdata['t']*0+np.nan,
list(model.getObservableIds()).index('observable_x1withsigma'))
# enable sensitivities
solver.setSensitivityOrder(amici.SensitivityOrder.first) # First-order ...
solver.setSensitivityMethod(amici.SensitivityMethod.adjoint) # ... adjoint sensitivities
model.requireSensitivitiesForAllParameters() # ... w.r.t. all parameters
# compute adjoint sensitivities
rdata = amici.runAmiciSimulation(model, solver, edata)
#print(rdata['sigmay'])
print('Log-likelihood: %f\nGradient: %s' % (rdata['llh'], rdata['sllh']))
In [20]:
from scipy.optimize import check_grad
def func(x0, symbol='llh', x0full=None, plist=[], verbose=False):
p = x0[:]
if len(plist):
p = x0full[:]
p[plist] = x0
verbose and print('f: p=%s' % p)
old_parameters = model.getParameters()
solver.setSensitivityOrder(amici.SensitivityOrder.none)
model.setParameters(p)
rdata = amici.runAmiciSimulation(model, solver, edata)
model.setParameters(old_parameters)
res = np.sum(rdata[symbol])
verbose and print(res)
return res
def grad(x0, symbol='llh', x0full=None, plist=[], verbose=False):
p = x0[:]
if len(plist):
model.setParameterList(plist)
p = x0full[:]
p[plist] = x0
else:
model.requireSensitivitiesForAllParameters()
verbose and print('g: p=%s' % p)
old_parameters = model.getParameters()
solver.setSensitivityMethod(amici.SensitivityMethod.forward)
solver.setSensitivityOrder(amici.SensitivityOrder.first)
model.setParameters(p)
rdata = amici.runAmiciSimulation(model, solver, edata)
model.setParameters(old_parameters)
res = rdata['s%s' % symbol]
if not isinstance(res, float):
if len(res.shape) == 3:
res = np.sum(res, axis=(0, 2))
verbose and print(res)
return res
epsilon = 1e-4
err_norm = check_grad(func, grad, p_orig, 'llh', epsilon=epsilon)
print('sllh: |error|_2: %f' % err_norm)
# assert err_norm < 1e-6
print()
for ip in range(model.np()):
plist = [ip]
p = p_orig.copy()
err_norm = check_grad(func, grad, p[plist], 'llh', p, [ip], epsilon=epsilon)
print('sllh: p[%d]: |error|_2: %f' % (ip, err_norm))
print()
for ip in range(model.np()):
plist = [ip]
p = p_orig.copy()
err_norm = check_grad(func, grad, p[plist], 'y', p, [ip], epsilon=epsilon)
print('sy: p[%d]: |error|_2: %f' % (ip, err_norm))
print()
for ip in range(model.np()):
plist = [ip]
p = p_orig.copy()
err_norm = check_grad(func, grad, p[plist], 'x', p, [ip], epsilon=epsilon)
print('sx: p[%d]: |error|_2: %f' % (ip, err_norm))
print()
for ip in range(model.np()):
plist = [ip]
p = p_orig.copy()
err_norm = check_grad(func, grad, p[plist], 'sigmay', p, [ip], epsilon=epsilon)
print('ssigmay: p[%d]: |error|_2: %f' % (ip, err_norm))
In [21]:
eps=1e-4
op=model.getParameters()
solver.setSensitivityMethod(amici.SensitivityMethod.forward) # forward sensitivity analysis
solver.setSensitivityOrder(amici.SensitivityOrder.first) # first-order sensitivities
model.requireSensitivitiesForAllParameters()
solver.setRelativeTolerance(1e-12)
rdata = amici.runAmiciSimulation(model, solver, edata)
def fd(x0, ip, eps, symbol='llh'):
p = list(x0[:])
old_parameters = model.getParameters()
solver.setSensitivityOrder(amici.SensitivityOrder.none)
p[ip]+=eps
model.setParameters(p)
rdata_f = amici.runAmiciSimulation(model, solver, edata)
p[ip]-=2*eps
model.setParameters(p)
rdata_b = amici.runAmiciSimulation(model, solver, edata)
model.setParameters(old_parameters)
return (rdata_f[symbol]-rdata_b[symbol])/(2*eps)
def plot_sensitivities(symbol, eps):
fig, axes = plt.subplots(4,2, figsize=(15,10))
for ip in range(4):
fd_approx = fd(model.getParameters(), ip, eps, symbol=symbol)
axes[ip,0].plot(edata.getTimepoints(), rdata[f's{symbol}'][:,ip,:], 'r-')
axes[ip,0].plot(edata.getTimepoints(), fd_approx, 'k--')
axes[ip,0].set_ylabel(f'sensitivity {symbol}')
axes[ip,0].set_xlabel('time')
axes[ip,1].plot(edata.getTimepoints(), np.abs(rdata[f's{symbol}'][:,ip,:]-fd_approx), 'k-')
axes[ip,1].set_ylabel('difference to fd')
axes[ip,1].set_xlabel('time')
axes[ip,1].set_yscale('log')
plt.tight_layout()
plt.show()
In [22]:
plot_sensitivities('x', eps)
In [23]:
plot_sensitivities('y', eps)
In [24]:
# run the simulation
rdata = amici.runAmiciSimulation(model, solver, edata)
In [25]:
# look at the ExpData as DataFrame
df = amici.getDataObservablesAsDataFrame(model, [edata])
df
Out[25]:
In [26]:
# from the exported dataframe, we can actually reconstruct a copy of the ExpData instace
reconstructed_edata = amici.getEdataFromDataFrame(model, df)
In [27]:
# look at the States in rdata as DataFrame
amici.getResidualsAsDataFrame(model, [edata], [rdata])
Out[27]:
In [28]:
# look at the Observables in rdata as DataFrame
amici.getSimulationObservablesAsDataFrame(model, [edata], [rdata])
Out[28]:
In [29]:
# look at the States in rdata as DataFrame
amici.getSimulationStatesAsDataFrame(model, [edata], [rdata])
Out[29]:
In [ ]: