In [1]:
# Import modules
import time
import math
import random
import numpy as np
import scipy
import sympy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
A linear congruential generator (LCG) has form
$$ \begin{align*} x_i &= ax_{i-1} + b\ (mod\ m) \\ u_i &= \frac{x_i}{m} \end {align*} $$for multiplier a, offset b, and modulus m
In [2]:
def linear_congruential_generator(x, a, b, m):
x = (a * x + b) % m
u = x / m
return u, x, a, b, m
In [3]:
x0 = 3
args = (x0, 13, 0, 31)
for i in range(10):
u, *args = linear_congruential_generator(*args)
print('idx_%02d x:%02d, u:%.4f' %(i + 1, args[0], u))
Evaluate $\frac{1}{b-a}\int_{a}^{b}f(x)dx$
In [4]:
x = sympy.symbols('x')
exact_value = sympy.integrate(x ** 2, (x, 0, 1))
In [5]:
# Arguments for our LCG
x0 = 3
args = (x0, 13, 0, 31)
# Function and arguments for the curve y = x^2
f = lambda x : pow(x, 2)
# Process for this example
def process(f, args, total_iterations):
avg = 0
for i in range(total_iterations):
u, *args = linear_congruential_generator(*args)
avg += f(u)
avg /= total_iterations
return avg
print('exact value = %s (%.6f in numerical representations)' %(exact_value, exact_value.evalf()))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 10), 10, abs(process(f, args, 10) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 20), 20, abs(process(f, args, 20) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 30), 30, abs(process(f, args, 30) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 40), 40, abs(process(f, args, 40) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 50), 50, abs(process(f, args, 50) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 60), 60, abs(process(f, args, 60) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 70), 70, abs(process(f, args, 70) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 80), 80, abs(process(f, args, 80) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 90), 90, abs(process(f, args, 90) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 100), 100, abs(process(f, args, 100) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, args, 1000), 1000, abs(process(f, args, 1000) - exact_value.evalf())))
In [6]:
def stdrand(x):
return linear_congruential_generator(x, pow(7, 5), 0, pow(2, 31) - 1)[:2]
In [7]:
# Function and arguments for the curve y = x^2
f = lambda x : pow(x, 2)
# Process for this example
def process(f, total_iterations):
avg = 0
x = 3
for i in range(total_iterations):
u, x = stdrand(x)
avg += f(u)
avg /= total_iterations
return avg
print('exact value = %s (%.6f in numerical representations)' %(exact_value, exact_value.evalf()))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 10), 10, abs(process(f, 10) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 20), 20, abs(process(f, 20) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 30), 30, abs(process(f, 30) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 40), 40, abs(process(f, 40) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 50), 50, abs(process(f, 50) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 60), 60, abs(process(f, 60) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 70), 70, abs(process(f, 70) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 80), 80, abs(process(f, 80) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 90), 90, abs(process(f, 90) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 100), 100, abs(process(f, 100) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 200), 200, abs(process(f, 200) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 300), 300, abs(process(f, 300) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 2000), 2000, abs(process(f, 2000) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 10000), 10000, abs(process(f, 10000) - exact_value.evalf())))
print('average = %.6f with %3d uniform random numbers, error = %.6f' %(process(f, 100000), 100000, abs(process(f, 100000) - exact_value.evalf())))
In [8]:
# restrict : 0 <= (x, y) <= 1
# Arguments
x0 = 3
args = (x0, pow(7, 5), 0, pow(2, 31) - 1)
f = lambda x, y : 4 * pow(2 * x - 1, 4) + 8 * pow(2 * y - 1, 8) < 1 + 2 * pow(2 * y - 1, 3) * pow(3 * x - 2, 2)
# Process for this example
def process(f, args, total_iterations):
hits = 0
for i in range(total_iterations):
ux, *args = linear_congruential_generator(*args)
uy, *args = linear_congruential_generator(*args)
hits += f(ux, uy)
area = hits / total_iterations
return area
print('area = %.6f with %3d uniform random numbers' %(process(f, args, 300000), 300000))
For its visualization (from https://www.desmos.com/calculator)
In [9]:
def randu(x):
return linear_congruential_generator(x, pow(2, 16) + 3, 0, pow(2, 31))[:2]
In [10]:
# For matplotlib
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(azim=225)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# Arguments for randu
datax = np.array([])
datay = np.array([])
dataz = np.array([])
x = 3
total_iterations = 20000
# Process
for i in range(total_iterations):
u1, x = randu(x)
u2, x = randu(x)
u3, x = randu(x)
datax = np.append(datax, u1)
datay = np.append(datay, u2)
dataz = np.append(dataz, u3)
ax.scatter(datax, datay, dataz, zdir='z', s=2)
Out[10]:
In [11]:
# For matplotlib
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.view_init(azim=225)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
# Arguments for randu
datax = np.array([])
datay = np.array([])
dataz = np.array([])
x = 3
total_iterations = 20000
# Process
for i in range(total_iterations):
u1, x = stdrand(x)
u2, x = stdrand(x)
u3, x = stdrand(x)
datax = np.append(datax, u1)
datay = np.append(datay, u2)
dataz = np.append(dataz, u3)
ax.scatter(datax, datay, dataz, zdir='z', s=2)
Out[11]:
see numpy.random.normal function
In [12]:
datax = np.array([])
for i in range(10000):
u1 = np.random.normal()
datax = np.append(datax, u1)
plt.plot(datax)
Out[12]:
In [13]:
def halton(p, n):
b = np.zeros(math.ceil(math.log(n + 1) / math.log(p)))
u = np.zeros(n)
for j in range(n):
i = 0
b[0] = b[0] + 1
while b[i] > p - 1 + np.finfo(float).eps:
b[i] = 0
i += 1
b[i] += 1
u[j] = 0
for k in range(1, b.size + 1):
u[j] = u[j] + b[k-1] * pow(p, -k)
return u
In [14]:
# Example
print(halton(2, 8))
print(halton(3, 8))
In [15]:
pair_count = 2000
pr_xdata = np.array([])
pr_ydata = np.array([])
qr_xdata = np.array([])
qr_ydata = np.array([])
qrx_seq = halton(2, pair_count)
qry_seq = halton(3, pair_count)
x = time.time()
for idx in range(pair_count):
ux, x = stdrand(x)
uy, x = stdrand(x)
pr_xdata = np.append(pr_xdata, ux)
pr_ydata = np.append(pr_ydata, uy)
qr_xdata = np.append(qr_xdata, qrx_seq[idx])
qr_ydata = np.append(qr_ydata, qry_seq[idx])
plt.figure(1)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.subplot(121)
plt.plot(pr_xdata, pr_ydata, 'o', markersize=1)
plt.subplot(122)
plt.plot(qr_xdata, qr_ydata, 'o', markersize=1)
Out[15]:
In [16]:
t = 10
w = 0
for i in range(t):
if random.random() > 0.5:
w += 1
else:
w -= 1
In [17]:
def random_walk(n, interval):
lowerbound = interval[0]
upperbound = interval[1]
top_exits = 0
avg_esc_time = 0
for _ in range(n):
w = 0
l = 0
while(True):
if random.random() > 0.5:
w += 1
else:
w -= 1
l += 1
if w == lowerbound:
pass
break
elif w == upperbound:
top_exits += 1
break
avg_esc_time += l
return top_exits, avg_esc_time / n
interval = (-3, 6)
top_exit_100, _ = random_walk(100, interval)
top_exit_200, _ = random_walk(200, interval)
top_exit_400, _ = random_walk(400, interval)
top_exit_800, _ = random_walk(800, interval)
top_exit_1600, _ = random_walk(1600, interval)
top_exit_3200, _ = random_walk(3200, interval)
top_exit_6400, _ = random_walk(6400, interval)
top_exit_12800, _ = random_walk(12800, interval)
top_exit_25600, _ = random_walk(25600, interval)
output = lambda n, top_exit : print('n = %5d, top exits = %4d, prob = %f, error = %f' \
%(n, top_exit, top_exit / n, abs(1 / 3 - top_exit / n)))
output(100, top_exit_100)
output(200, top_exit_200)
output(400, top_exit_400)
output(800, top_exit_800)
output(1600, top_exit_1600)
output(3200, top_exit_3200)
output(6400, top_exit_6400)
output(12800, top_exit_12800)
output(25600, top_exit_25600)
In [18]:
interval = (-3, 6)
_, avg_esc_100 = random_walk(100, interval)
_, avg_esc_200 = random_walk(200, interval)
_, avg_esc_400 = random_walk(400, interval)
_, avg_esc_800 = random_walk(800, interval)
_, avg_esc_1600 = random_walk(1600, interval)
_, avg_esc_3200 = random_walk(3200, interval)
_, avg_esc_6400 = random_walk(6400, interval)
output = lambda n, avg_esc : print('n = %5d, average esc. time = %f, error = %f' \
%(n, avg_esc, abs(18 - avg_esc)))
output(100, avg_esc_100)
output(200, avg_esc_200)
output(400, avg_esc_400)
output(800, avg_esc_800)
output(1600, avg_esc_1600)
output(3200, avg_esc_3200)
output(6400, avg_esc_6400)
Source from http://scipy-cookbook.readthedocs.io/items/BrownianMotion.html
In [19]:
"""
brownian() implements one dimensional Brownian motion (i.e. the Wiener process).
"""
# File: brownian.py
from math import sqrt
from scipy.stats import norm
import numpy as np
def brownian(x0, n, dt, delta, out=None):
"""
Generate an instance of Brownian motion (i.e. the Wiener process):
X(t) = X(0) + N(0, delta**2 * t; 0, t)
where N(a,b; t0, t1) is a normally distributed random variable with mean a and
variance b. The parameters t0 and t1 make explicit the statistical
independence of N on different time intervals; that is, if [t0, t1) and
[t2, t3) are disjoint intervals, then N(a, b; t0, t1) and N(a, b; t2, t3)
are independent.
Written as an iteration scheme,
X(t + dt) = X(t) + N(0, delta**2 * dt; t, t+dt)
If `x0` is an array (or array-like), each value in `x0` is treated as
an initial condition, and the value returned is a numpy array with one
more dimension than `x0`.
Arguments
---------
x0 : float or numpy array (or something that can be converted to a numpy array
using numpy.asarray(x0)).
The initial condition(s) (i.e. position(s)) of the Brownian motion.
n : int
The number of steps to take.
dt : float
The time step.
delta : float
delta determines the "speed" of the Brownian motion. The random variable
of the position at time t, X(t), has a normal distribution whose mean is
the position at time t=0 and whose variance is delta**2*t.
out : numpy array or None
If `out` is not None, it specifies the array in which to put the
result. If `out` is None, a new numpy array is created and returned.
Returns
-------
A numpy array of floats with shape `x0.shape + (n,)`.
Note that the initial value `x0` is not included in the returned array.
"""
x0 = np.asarray(x0)
# For each element of x0, generate a sample of n numbers from a
# normal distribution.
r = norm.rvs(size=x0.shape + (n,), scale=delta*sqrt(dt))
# If `out` was not given, create an output array.
if out is None:
out = np.empty(r.shape)
# This computes the Brownian motion by forming the cumulative sum of
# the random samples.
np.cumsum(r, axis=-1, out=out)
# Add the initial condition.
out += np.expand_dims(x0, axis=-1)
return out
In [20]:
N = 500
xlim = 2.0
# For SDE
sigma = 0.3
r = 1
y0 = 0
X = np.linspace(0, xlim, N)
# For Brownian motion
dt = 0.1
delta = 0.3
B1 = brownian(y0, N, dt, delta)
B2 = brownian(y0, N, dt, delta)
# Process
Y = y0 + r * X
Y1 = y0 + r * X + sigma * B1
Y2 = y0 + r * X + sigma * B2
plt.xlim(0, 2)
plt.plot(X, Y1)
plt.plot(X, Y2)
plt.plot(X, Y, color='black')
Out[20]:
In [21]:
N = 500
xlim = 2.0
r = 0.1
sigma = 0.3
delta = 0.1
dt = 0.2
y0 = 1
X = np.linspace(0, xlim, N)
# For Brownian motion
B = brownian(0, N, dt, delta)
# Process
Y = y0 * np.exp((r - 0.5 * pow(sigma, 2)) * X + sigma * B)
plt.plot(X, Y)
plt.plot(X, B, linestyle = '--')
plt.grid(True)
Given the SDE initial value problem
$\left\{\begin{matrix} dy(t) & = & f(t,y)dt & + & g(t,y)dB_t \\ y(a) & = & y_a \end{matrix}\right.$
Each random number $\Delta B_i$ is computed by $\Delta B_i = z_i\sqrt{\Delta t_i}$ where $z_i$ is chosen from $N(0, 1)$
In [22]:
dt = 0.01
xlimit = 2
y0 = 1
r = 0.1
sigma = 0.3
times = np.arange(0, xlimit + dt, dt)
dB = np.random.standard_normal(times.size) * np.sqrt(dt)
ws = np.empty(times.size)
ws[0] = y0
for i in range(times.size - 1):
ws[i + 1] = ws[i] + r * ws[i] * dt + sigma * ws[i] * dB[i]
# Plot the chart
plt.plot(times, ws)
plt.axhline(y=0, color='black')
plt.axvline(x=0, color='black')
plt.grid(True, which='both')
In [23]:
dt = 0.1
xlimit = 100
y0 = 0
r = 10
sigma = 1
delta = 0.5
times = np.arange(0, xlimit + dt, dt)
dB = np.random.standard_normal(times.size) * np.sqrt(dt)
ws = np.empty(times.size)
ws[0] = y0
for i in range(times.size - 1):
ws[i + 1] = ws[i] - r * ws[i] * dt + sigma * dB[i]
# For Brownian motion realization
BM = brownian(0, times.size, dt, delta)
# Plot the chart
plt.plot(times, ws, label='Langevin equation')
plt.plot(times, BM, label='Brownian motion')
plt.axhline(y = 0, color='black')
plt.axvline(x = 0, color='black')
plt.grid(True, which='both')
plt.legend()
Out[23]:
In [24]:
dt = 0.1
xlimit = 4
y0 = 1e-2
r = 0.1
sigma = 0.3
times = np.arange(0, xlimit + dt, dt)
dB = np.random.standard_normal(times.size) * np.sqrt(dt)
ws = np.empty(times.size)
ws[0] = y0 # For Euler-Maruyama Method
wms = np.empty(times.size)
wms[0] = y0 # For Milstein Method
for i in range(times.size - 1):
# Euler-Maruyama
ws[i + 1] = ws[i] + r * ws[i] * dt \
+ sigma * ws[i] * dB[i]
# Milstein
wms[i + 1] = wms[i] + r * wms[i] * dt \
+ sigma * wms[i] * dB[i] \
+ 0.5 * pow(sigma, 2) * wms[i] * (pow(dB[i], 2) - dt)
# Calculate y(T)
tmp = dB
tmp[-1] = 0
B = np.cumsum(np.roll(tmp, 1))
f = lambda y0, sigma, t, B : y0 * np.exp((r - 0.5 * np.power(sigma, 2)) * t + sigma * B)
Y = f(y0, sigma, times, B)
# Plot the chart
plt.plot(times, ws, label='w(t) by Euler-Maruyama Method')
plt.plot(times, wms, label='w(t) by Milstein Method')
plt.plot(times, Y, label='Y(T)')
plt.grid(True, which='both')
plt.legend()
plt.show()
# Plot the chart
plt.ylabel('|y(T)-w(T)|')
plt.plot(times, np.abs(Y - ws), label='Euler-Maruyama Method')
plt.plot(times, np.abs(Y - wms), label='Milstein Method')
plt.grid(True, which='both')
plt.legend()
plt.show()
In [25]:
dts = np.array([
pow(2, -1), pow(2, -2), pow(2, -3), pow(2, -4), pow(2, -5),
pow(2, -6), pow(2, -7), pow(2, -8), pow(2, -9), pow(2, -10)
])
errs_em = np.empty(dts.size)
errs_m = np.empty(dts.size)
xlimit = 4
y0 = 1e-2
r = 0.1
sigma = 0.3
# For each dt
for i in range(dts.size):
dt = dts[i]
times = np.arange(0, xlimit + dt, dt)
dB = np.random.standard_normal(times.size) * np.sqrt(dt)
ws = np.empty(times.size)
ws[0] = y0 # For Euler-Maruyama Method
wms = np.empty(times.size)
wms[0] = y0 # For Milstein Method
for j in range(times.size - 1):
# Euler-Maruyama
ws[j + 1] = ws[j] + r * ws[j] * dt \
+ sigma * ws[j] * dB[j]
# Milstein
wms[j + 1] = wms[j] + r * wms[j] * dt \
+ sigma * wms[j] * dB[j] \
+ 0.5 * pow(sigma, 2) * wms[j] * (pow(dB[j], 2) - dt)
# Calculate y(T)
tmp = dB
tmp[-1] = 0
B = np.cumsum(np.roll(tmp, 1))
f = lambda y0, sigma, t, B : y0 * np.exp((r - 0.5 * np.power(sigma, 2)) * t + sigma * B)
Y = f(y0, sigma, times, B)
errs_em[i] = abs(Y[-1] - ws[-1])
errs_m[i] = abs(Y[-1] - wms[-1])
# Plot the chart
fig, ax = plt.subplots()
plt.xlabel('dt')
plt.ylabel('|y(T)-w(T)|')
xi = np.arange(dts.size)
plt.xticks(xi, dts)
plt.plot(xi, errs_em, label='Euler-Maruyama Method')
plt.plot(xi, errs_m, label='Milstein Method')
plt.grid(True, which='both')
plt.legend()
fig.autofmt_xdate()
plt.show()
Use the Euler-Maruyama Method, the Milstein Method, and the First-Order SAtochastic Runge-Kutta Method to solve the SDE
The Euler-Maruyama Method for this equation is
The Milstein Method is
The First-Order Stochastic Runge-Kutta Method is
In [26]:
dt = 0.1
xlimit = 4
y0 = 2
times = np.arange(0, xlimit + dt, dt)
dB = np.random.standard_normal(times.size) * np.sqrt(dt)
ws_em = np.empty(times.size)
ws_em[0] = y0 # For Euler-Maruyama Method
ws_m = np.empty(times.size)
ws_m[0] = y0 # For Milstein Method
ws_rk = np.empty(times.size)
ws_rk[0] = y0 # For First-Order Stochastic Runge-Kutta Method
for i in range(times.size - 1):
# Euler-Maruyama Method
ws_em[i + 1] = ws_em[i] - 2 * np.exp(-2 * ws_em[i]) * dt + 2 * np.exp(-ws_em[i]) * dB[i]
# Milstein Method
ws_m[i + 1] = ws_m[i] - 2 * np.exp(-2 * ws_m[i]) * dt + 2 * np.exp(-ws_m[i]) * dB[i] - \
2 * np.exp(-2 * ws_m[i]) * (np.power(dB[i], 2) - dt)
# First-Order Stochastic Runge-Kutta Method
ws_rk[i + 1] = ws_rk[i] - 2 * np.exp(-2 * ws_rk[i]) * dt + 2 * np.exp(-ws_rk[i]) * dB[i] + \
(2 * np.exp(-(ws_rk[i] + 2 * np.exp(-ws_rk[i]) * np.sqrt(dt))) - 2 * np.exp(-ws_rk[i])) * (np.power(dB[i], 2) - dt) / (2 * np.sqrt(dt))
# Plot the chart
plt.plot(times, ws_em, label = 'Euler-Maruyama Method')
plt.plot(times, ws_m, label = 'Milstein Method')
plt.plot(times, ws_rk, label = 'First-Order Stochastic Runge-Kutta Method')
plt.legend()
plt.show()
In [27]:
dt = 0.01
t0, t1 = 1, 3
y0, y1 = 1, 2
times = np.arange(t0, t1 + dt * 1, dt)
dB1 = np.random.standard_normal(times.size) * np.sqrt(dt)
dB1[-2] = 0
ws1 = np.empty(times.size)
ws1[0] = y0
dB2 = np.random.standard_normal(times.size) * np.sqrt(dt)
dB2[-2] = 0
ws2 = np.empty(times.size)
ws2[0] = y0
dB3 = np.random.standard_normal(times.size) * np.sqrt(dt)
dB3[-2] = 0
ws3 = np.empty(times.size)
ws3[0] = y0
# Let's use Euler-Maruyama Method
for i in range(times.size - 1):
ws1[i + 1] = ws1[i] + (y1 - ws1[i]) * dt / (dt * (times.size - i - 1)) + dB1[i]
ws2[i + 1] = ws2[i] + (y1 - ws2[i]) * dt / (dt * (times.size - i - 1)) + dB2[i]
ws3[i + 1] = ws3[i] + (y1 - ws3[i]) * dt / (dt * (times.size - i - 1)) + dB3[i]
# Plot the chart
plt.plot(times, ws1)
plt.plot(times, ws2)
plt.plot(times, ws3)
plt.plot(t0, y0, marker='o', color='k')
plt.plot(t1, y1, marker='o', color='k')
plt.grid(True)
plt.show()