In [1]:
import numpy as np
import sympy as sp
%matplotlib nbagg
import matplotlib.pyplot as plt
from pyNT import Wiener, SDE, ODE
In [2]:
C1 = sp.Symbol('C_1')
C2 = sp.Symbol('C_2')
A = sp.Symbol('A')
In [3]:
lambda1 = 1.
lambda2 = .5
sigma21 = 0.3
alpha = 0.5
beta = 0.67
delta1 = .1
delta2 = .07
delta0 = .1
mu = 2.
coeff_system = ODE(x = [C1, C2, A],
f = [- lambda1*C1 -
C1**2/2 -
sigma21*C1*C2 -
(1 + beta)*C2**2 +
delta1,
-lambda2*C2 + delta2,
mu*C1 - alpha*C2**2 / 2 + delta0])
In [4]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
for solver in [['Heun', 'cRK'],
['cRK', 'Taylor4']]:
evdt = coeff_system.get_evdt(
X0 = np.zeros(3),
solver = solver,
h0 = .5,
exp_range = range(10))
# no errorbars since there's only one trajectory
ax.plot(evdt[:, 0],
evdt[:, 2],
label = '{0} vs {1}'.format(solver[0], solver[1]),
marker = '.')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(loc = 'best')
Out[4]:
In [5]:
coeff_time = np.linspace(.0, 1., 1025)
coeff_traj = coeff_system.cRK(2.**(-10), 2**10, np.zeros(3))
In [6]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(coeff_time, coeff_traj[:, 0], label = '$C_1$')
ax.plot(coeff_time, coeff_traj[:, 1], label = '$C_2$')
ax.plot(coeff_time, coeff_traj[:, 2], label = '$A$')
ax.legend(loc = 'best')
Out[6]:
In [7]:
# reverse coefficients
traj_C1 = coeff_traj[::-1, 0]
traj_C2 = coeff_traj[::-1, 1]
traj_A = coeff_traj[::-1, 2]
In [8]:
x = sp.Symbol('x')
y = sp.Symbol('y')
xy_sys = SDE(
x = [x, y],
a = [mu - lambda1*x,
- lambda2*y],
b = [[x**.5, 0],
[sigma21*x**.5, (alpha + beta*x)**.5]])
In [9]:
dt0, evdt0, deltae0 = xy_sys.get_evdt_vs_M(
ntraj = 32,
X0 = np.ones(2),
h0 = .25,
solver = ['Milstein', 'explicit_1p0'],
exp_range = range(7))
In [10]:
print(dt0)
print(evdt0)
print(deltae0)
In [11]:
# get solution
nbatches = 20
ntraj = 64
tstep_index = 1
w = Wiener(nsteps = int(1 / dt0[tstep_index]),
dt = dt0[tstep_index],
noise_dimension = xy_sys.get_noise_dimension(),
solution_shape = [nbatches, ntraj])
w.initialize()
xy_traj = xy_sys.explicit_1p0(w, np.ones(2))
In [12]:
# check that components of the Wiener process are different
print(w.W.shape)
print(w.dW.shape)
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), w.W[:, 0, 0, 0], label = '$X$')
ax.plot(w.get_time(), w.W[:, 1, 0, 0], label = '$Y$')
ax.legend(loc = 'best')
Out[12]:
In [13]:
print(xy_traj.shape)
traj_C1 = traj_C1[::2]
traj_C2 = traj_C2[::2]
traj_A = traj_A[::2]
print(traj_C1.shape)
In [14]:
X = xy_traj[:, 0]
Y = xy_traj[:, 1]
In [15]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), X[:, 0, 0], label = '$X$')
ax.plot(w.get_time(), Y[:, 0, 0], label = '$Y$')
ax.legend(loc = 'best')
# the curves look very similar to the Wiener components,
#but they are different
Out[15]:
In [16]:
# naive B
batch_pick = np.random.randint(0, nbatches)
traj_pick = np.random.randint(0, ntraj)
B = np.exp(-X*traj_C1[:, None, None]
-Y*traj_C2[:, None, None]
-traj_A[:, None, None])
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), B[:, batch_pick, traj_pick], label = '$B$')
ax.legend(loc = 'best')
Out[16]:
In [17]:
# compute error
final_error = evdt0[tstep_index]*4 # the 8 comes from using h0=0.25
rel_error_plus = 1 + np.linspace(0, final_error, X.shape[0])
rel_error_minus = 1 + np.linspace(0, -final_error, X.shape[0])
# note: I ignored the errors in C_1, C_2 and A since they're at least ten
#orders of magnitude less than the SDE errors...
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), rel_error_plus)
ax.plot(w.get_time(), rel_error_minus)
Out[17]:
In [18]:
batch_pick = np.random.randint(0, nbatches)
traj_pick = np.random.randint(0, ntraj)
B = np.exp(-X*traj_C1[:, None, None]
-Y*traj_C2[:, None, None]
-traj_A[:, None, None])
Bpp = np.exp(
-X*traj_C1[:, None, None]*rel_error_plus[:, None, None]
-Y*traj_C2[:, None, None]*rel_error_plus[:, None, None]
-traj_A[:, None, None])
Bpm = np.exp(
-X*traj_C1[:, None, None]*rel_error_plus[:, None, None]
-Y*traj_C2[:, None, None]*rel_error_minus[:, None, None]
-traj_A[:, None, None])
Bmp = np.exp(
-X*traj_C1[:, None, None]*rel_error_minus[:, None, None]
-Y*traj_C2[:, None, None]*rel_error_plus[:, None, None]
-traj_A[:, None, None])
Bmm = np.exp(
-X*traj_C1[:, None, None]*rel_error_minus[:, None, None]
-Y*traj_C2[:, None, None]*rel_error_minus[:, None, None]
-traj_A[:, None, None])
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), B[:, batch_pick, traj_pick], label = '$B$')
ax.plot(w.get_time(), Bpp[:, batch_pick, traj_pick], label = '$B_{++}$')
ax.plot(w.get_time(), Bpm[:, batch_pick, traj_pick], label = '$B_{+-}$')
ax.plot(w.get_time(), Bmp[:, batch_pick, traj_pick], label = '$B_{-+}$')
ax.plot(w.get_time(), Bmm[:, batch_pick, traj_pick], label = '$B_{--}$')
ax.legend(loc = 'best')
Out[18]:
Comment:
The result at $t=1$ is always 1 because we impose that $C_1$, $C_2$ and $A$ are 0 there (so we impose that $B(1) = 1$, it's independent of what methods we're using). The difference between different estimates of $B$ must therefore be 0 at $t=1$. It is also 0 at $t=0$ since the errors are 0 there as well.
In [19]:
# statistics on fluctuations in B
fullB = np.array([Bpp, Bpm, Bmp, Bmm])
minB = np.min(fullB, axis = 0)
maxB = np.max(fullB, axis = 0)
deltaB = maxB - minB
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(), deltaB[:, batch_pick, traj_pick], label = '$\delta B$')
ax.legend(loc = 'best')
Out[19]:
In [21]:
fig = plt.figure(figsize=(6,4))
ax = fig.add_subplot(111)
ax.plot(w.get_time(),
np.average(deltaB, axis = (1, 2)),
label = '$\delta B$')
ax.plot(w.get_time(),
np.nanmax(deltaB, axis = (1, 2)),
label = '$\delta B$')
ax.legend(loc = 'best')
Out[21]:
In [22]:
xy_traj_alt = xy_sys.Milstein(w, np.ones(2))
from pyNT.sde import get_t1ma_nm1
deltaB = (
np.abs(
np.exp(
-xy_traj[:, 0]*traj_C1[:, None, None]
-xy_traj[:, 1]*traj_C2[:, None, None]
-traj_A[:, None, None]) -
np.exp(
-xy_traj_alt[:, 0]*traj_C1[:, None, None]
-xy_traj_alt[:, 1]*traj_C2[:, None, None]
-traj_A[:, None, None])) /
np.exp(
-xy_traj[:, 0]*traj_C1[:, None, None]
-xy_traj[:, 1]*traj_C2[:, None, None]
-traj_A[:, None, None]))
avdB = np.average(deltaB,
axis = (1, 2))
sigma2 = (np.sum((np.average(deltaB, axis = 2) - avdB[:, None])**2, axis = 1) /
(deltaB.shape[1] - 1))
errB = (get_t1ma_nm1(0.99, deltaB.shape[1] - 1) *
(sigma2 / deltaB.shape[1])**.5)
In [23]:
ax = plt.figure(figsize=(6,4)).add_subplot(111)
ax.plot(w.get_time(),
avdB,
color = 'blue',
label = '$\\langle \\delta B \\rangle$')
ax.plot(w.get_time(),
np.max([np.min(deltaB,
axis = (1, 2)),
avdB - errB], axis = 0),
color = 'blue',
dashes = (1,1))
ax.plot(w.get_time(),
np.min([np.max(deltaB,
axis = (1, 2)),
avdB + errB], axis = 0),
color = 'blue',
dashes = (1,1))
ax.plot(w.get_time(),
np.max(deltaB, axis = (1, 2)),
color = 'red',
label = '$\\max \\delta B$')
ax.plot(w.get_time(),
np.min(deltaB, axis = (1, 2)),
color = 'green',
label = '$\\min \\delta B$')
ax.legend(loc = 'best')
Out[23]: