In [1]:
import numpy as np
import sympy as sp

%matplotlib nbagg
import matplotlib.pyplot as plt

from pyNT import Wiener, SDE, ODE

In [2]:
C1 = sp.Symbol('C_1')
C2 = sp.Symbol('C_2')
A  = sp.Symbol('A')

In [3]:
lambda1  = 1.
lambda2  = .5
sigma21  = 0.3
alpha    = 0.5
beta     = 0.67
delta1   = .1
delta2   = .07
delta0   = .1
mu = 2.

coeff_system = ODE(x = [C1, C2, A],
                   f = [- lambda1*C1 -
                        C1**2/2 -
                        sigma21*C1*C2 -
                        (1 + beta)*C2**2 +
                        delta1,
                        -lambda2*C2 + delta2,
                        mu*C1 - alpha*C2**2 / 2 + delta0])

In [4]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
for solver in [['Heun', 'cRK'],
               ['cRK', 'Taylor4']]:
    evdt = coeff_system.get_evdt(
        X0 = np.zeros(3),
        solver = solver,
        h0 = .5,
        exp_range = range(10))
    # no errorbars since there's only one trajectory
    ax.plot(evdt[:, 0],
            evdt[:, 2],
            label = '{0} vs {1}'.format(solver[0], solver[1]),
            marker = '.')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(loc = 'best')


Out[4]:
<matplotlib.legend.Legend at 0x7f23783089e8>

In [5]:
coeff_time = np.linspace(.0, 1., 1025)
coeff_traj = coeff_system.cRK(2.**(-10), 2**10, np.zeros(3))

In [6]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(coeff_time, coeff_traj[:, 0], label = '$C_1$')
ax.plot(coeff_time, coeff_traj[:, 1], label = '$C_2$')
ax.plot(coeff_time, coeff_traj[:, 2], label = '$A$')
ax.legend(loc = 'best')


Out[6]:
<matplotlib.legend.Legend at 0x7f237754acc0>

In [7]:
# reverse coefficients
traj_C1 = coeff_traj[::-1, 0]
traj_C2 = coeff_traj[::-1, 1]
traj_A  = coeff_traj[::-1, 2]

In [8]:
x = sp.Symbol('x')
y = sp.Symbol('y')
xy_sys = SDE(
    x = [x, y],
    a = [mu - lambda1*x,
         - lambda2*y],
    b = [[x**.5, 0],
         [sigma21*x**.5, (alpha + beta*x)**.5]])

In [9]:
dt0, evdt0, deltae0 = xy_sys.get_evdt_vs_M(
    ntraj = 32,
    X0 = np.ones(2),
    h0 = .25,
    solver = ['Milstein', 'explicit_1p0'],
    exp_range = range(7))



In [10]:
print(dt0)
print(evdt0)
print(deltae0)


[0.0009765625, 0.001953125, 0.00390625, 0.0078125, 0.015625, 0.03125, 0.0625]
[0.00015434829020161353, 0.00030423970009375726, 0.00061034091888271396, 0.0012006179490907762, 0.0023516657713537233, 0.0045922696275510298, 0.0090286301234447225]
[5.5474569209934225e-06, 1.0317517233969969e-05, 2.0525450715739996e-05, 4.0891867642228929e-05, 8.6468965665335862e-05, 0.00016907347228969685, 0.00038326446598309144]

In [11]:
# get solution
nbatches = 20
ntraj = 64
tstep_index = 1
w = Wiener(nsteps = int(1 / dt0[tstep_index]),
           dt = dt0[tstep_index],
           noise_dimension = xy_sys.get_noise_dimension(),
           solution_shape = [nbatches, ntraj])
w.initialize()
xy_traj = xy_sys.explicit_1p0(w, np.ones(2))

In [12]:
# check that components of the Wiener process are different
print(w.W.shape)
print(w.dW.shape)
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), w.W[:, 0, 0, 0], label = '$X$')
ax.plot(w.get_time(), w.W[:, 1, 0, 0], label = '$Y$')
ax.legend(loc = 'best')


(513, 2, 20, 64)
(512, 2, 20, 64)
Out[12]:
<matplotlib.legend.Legend at 0x7f237740e630>

In [13]:
print(xy_traj.shape)
traj_C1 = traj_C1[::2]
traj_C2 = traj_C2[::2]
traj_A  =  traj_A[::2]
print(traj_C1.shape)


(513, 2, 20, 64)
(513,)

In [14]:
X = xy_traj[:, 0]
Y = xy_traj[:, 1]

In [15]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), X[:, 0, 0], label = '$X$')
ax.plot(w.get_time(), Y[:, 0, 0], label = '$Y$')
ax.legend(loc = 'best')
# the curves look very similar to the Wiener components,
#but they are different


Out[15]:
<matplotlib.legend.Legend at 0x7f237735cb70>

In [16]:
# naive B
batch_pick = np.random.randint(0, nbatches)
traj_pick  = np.random.randint(0, ntraj)
B = np.exp(-X*traj_C1[:, None, None]
           -Y*traj_C2[:, None, None]
           -traj_A[:, None, None])
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), B[:, batch_pick, traj_pick], label = '$B$')
ax.legend(loc = 'best')


Out[16]:
<matplotlib.legend.Legend at 0x7f23773405f8>

In [17]:
# compute error
final_error = evdt0[tstep_index]*4 # the 8 comes from using h0=0.25
rel_error_plus = 1 + np.linspace(0,  final_error, X.shape[0])
rel_error_minus = 1 + np.linspace(0, -final_error, X.shape[0])
# note: I ignored the errors in C_1, C_2 and A since they're at least ten
#orders of magnitude less than the SDE errors...

fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), rel_error_plus)
ax.plot(w.get_time(), rel_error_minus)


Out[17]:
[<matplotlib.lines.Line2D at 0x7f2377305eb8>]

In [18]:
batch_pick = np.random.randint(0, nbatches)
traj_pick  = np.random.randint(0, ntraj)
B = np.exp(-X*traj_C1[:, None, None]
           -Y*traj_C2[:, None, None]
           -traj_A[:, None, None])
Bpp = np.exp(
    -X*traj_C1[:, None, None]*rel_error_plus[:, None, None]
    -Y*traj_C2[:, None, None]*rel_error_plus[:, None, None]
    -traj_A[:, None, None])
Bpm = np.exp(
    -X*traj_C1[:, None, None]*rel_error_plus[:, None, None]
    -Y*traj_C2[:, None, None]*rel_error_minus[:, None, None]
    -traj_A[:, None, None])
Bmp = np.exp(
    -X*traj_C1[:, None, None]*rel_error_minus[:, None, None]
    -Y*traj_C2[:, None, None]*rel_error_plus[:, None, None]
    -traj_A[:, None, None])
Bmm = np.exp(
    -X*traj_C1[:, None, None]*rel_error_minus[:, None, None]
    -Y*traj_C2[:, None, None]*rel_error_minus[:, None, None]
    -traj_A[:, None, None])
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), B[:, batch_pick, traj_pick], label = '$B$')
ax.plot(w.get_time(), Bpp[:, batch_pick, traj_pick], label = '$B_{++}$')
ax.plot(w.get_time(), Bpm[:, batch_pick, traj_pick], label = '$B_{+-}$')
ax.plot(w.get_time(), Bmp[:, batch_pick, traj_pick], label = '$B_{-+}$')
ax.plot(w.get_time(), Bmm[:, batch_pick, traj_pick], label = '$B_{--}$')
ax.legend(loc = 'best')


Out[18]:
<matplotlib.legend.Legend at 0x7f237743bfd0>

Comment:

The result at $t=1$ is always 1 because we impose that $C_1$, $C_2$ and $A$ are 0 there (so we impose that $B(1) = 1$, it's independent of what methods we're using). The difference between different estimates of $B$ must therefore be 0 at $t=1$. It is also 0 at $t=0$ since the errors are 0 there as well.


In [19]:
# statistics on fluctuations in B
fullB = np.array([Bpp, Bpm, Bmp, Bmm])
minB = np.min(fullB, axis = 0)
maxB = np.max(fullB, axis = 0)
deltaB = maxB - minB
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), deltaB[:, batch_pick, traj_pick], label = '$\delta B$')
ax.legend(loc = 'best')


Out[19]:
<matplotlib.legend.Legend at 0x7f237723a828>

In [21]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(),
        np.average(deltaB, axis = (1, 2)),
        label = '$\delta B$')
ax.plot(w.get_time(),
        np.nanmax(deltaB, axis = (1, 2)),
        label = '$\delta B$')
ax.legend(loc = 'best')


Out[21]:
<matplotlib.legend.Legend at 0x7f237455c550>

In [22]:
xy_traj_alt = xy_sys.Milstein(w, np.ones(2))
from pyNT.sde import get_t1ma_nm1
deltaB = (
    np.abs(
        np.exp(
            -xy_traj[:, 0]*traj_C1[:, None, None]
            -xy_traj[:, 1]*traj_C2[:, None, None]
            -traj_A[:, None, None]) -
        np.exp(
            -xy_traj_alt[:, 0]*traj_C1[:, None, None]
            -xy_traj_alt[:, 1]*traj_C2[:, None, None]
            -traj_A[:, None, None])) /
    np.exp(
        -xy_traj[:, 0]*traj_C1[:, None, None]
        -xy_traj[:, 1]*traj_C2[:, None, None]
        -traj_A[:, None, None]))
avdB = np.average(deltaB,
                  axis = (1, 2))
sigma2 = (np.sum((np.average(deltaB, axis = 2) - avdB[:, None])**2, axis = 1) /
          (deltaB.shape[1] - 1))
errB = (get_t1ma_nm1(0.99, deltaB.shape[1] - 1) *
        (sigma2 / deltaB.shape[1])**.5)

In [23]:
ax = plt.figure(figsize=(6,4)).add_subplot(111)
ax.plot(w.get_time(),
        avdB,
        color = 'blue',
        label = '$\\langle \\delta B \\rangle$')
ax.plot(w.get_time(),
        np.max([np.min(deltaB,
                       axis = (1, 2)),
                avdB - errB], axis = 0),
        color = 'blue',
        dashes = (1,1))
ax.plot(w.get_time(),
        np.min([np.max(deltaB,
                       axis = (1, 2)),
                avdB + errB], axis = 0),
        color = 'blue',
        dashes = (1,1))
ax.plot(w.get_time(),
        np.max(deltaB, axis = (1, 2)),
        color = 'red',
        label = '$\\max \\delta B$')
ax.plot(w.get_time(),
        np.min(deltaB, axis = (1, 2)),
        color = 'green',
        label = '$\\min \\delta B$')
ax.legend(loc = 'best')


Out[23]:
<matplotlib.legend.Legend at 0x7f23745422b0>