In [1]:
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import math
We first establish a working resolution
In [2]:
res = 0.01
dt = res
In [3]:
default_lambda_1, default_lambda_2, default_lambda_3 = 0.086, 0.141, 0.773
In [4]:
def quad2(x_1, y_1, x_2, y_2,
lambda_1 = default_lambda_1,
lambda_2 = default_lambda_2,
lambda_3 = default_lambda_3):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return x_1_dot, y_1_dot, x_2_dot, y_2_dot
In [5]:
def plot_quad(ws, xs, ys, zs, plot_type = 0, txt = ""):
if plot_type == 0:
print("Plotting Double Plot Quad Viz")
plt.figure(1)
plt.subplot(2, 1, 1)
plt.subplots_adjust(top=0.85)
plt.plot(xs, ws)
#plt.yscale('linear')
plt.title('xy')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.subplot(2, 1, 2)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('wz')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 1:
print("Plotting Overlain Double Plot Quad Viz")
plt.figure(1)
plt.plot(xs, ws)
plt.plot(ys, zs)
#plt.yscale('linear')
plt.title('x-w, y-z')
plt.grid(True)
#plt.gca().set_aspect('equal')
plt.suptitle(txt, fontsize=14)
plt.show()
elif plot_type == 2:
print("Plotting Sphere Plot Quad Viz")
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
qdist = quad_distance(ws, xs, ys, zs)
ws = np.divide(ws, qdist)
xs = np.divide(xs, qdist)
ys = np.divide(ys, qdist)
zs = np.divide(zs, qdist)
ax.plot(xs, ys, zs)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Nonrigorous Solution")
plt.show()
else:
print("Invalid Plot Type")
In [6]:
stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = ( 0.372854105052,
0.393518965248,
-0.0359026080443,
-0.216701666067 )
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
#plot_quad(ws, xs, ys, zs, float(1))
We will leverage the homoegeneity of the system to find a hypothetical solution (nonrigorously still, of course). We do this by fixing a period T and seeking to minimize the distance between f(x_0 + T) and f(x_0). We will vary x_0, seeking improvements via Newton's method. Random restarts may be necessary.
In [7]:
def f(x_1, y_1, x_2, y_2):
"""Just a clone of quad2"""
return quad2(x_1, y_1, x_2, y_2)
def F(x_1, y_1, x_2, y_2, T):
"""Find f(x + T)"""
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = f(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
return ws[-1], xs[-1], ys[-1], zs[-1]
def quad_dist(w, x, y, z):
"""Computes the Euclidian distance"""
return [w[i]**2 + x[i]**2 + y[i]**2 + z[i]**2 for i in range(len(w))]
def quad_sq_distance(x, y):
"""Computes the squared distance"""
dists = [ x[i] - y[i] for i in range(len(x) )]
dists = [ dists[i]**2 for i in range(len(x) )]
return sum(dists)
In [8]:
default_start = (0.372854105052, 0.393518965248, -0.0359026080443, -0.216701666067)
## break up tuple, test
testf = f(*default_start)
start_point = F(*default_start, 0)
end_point = F(*default_start, 1)
## testing
print(testf)
print(start_point)
print(end_point)
print(quad_sq_distance(start_point, end_point))
We define g to be ( F(x_0) - F(x_0 + T) )^2
In [9]:
def g(x_1, y_1, x_2, y_2, T = 1):
return quad_sq_distance( F(x_1, y_1, x_2, y_2, T), F(x_1, y_1, x_2, y_2, 0) )
In [10]:
## Testing
print(g(*default_start))
We try minimizing g while varying only T.
In [11]:
current_T = 105.9
current_dist = 0.7339
current_radius = 5
print(g(*default_start, current_T))
while current_dist > 0.0725:
## compute distances at edges
d_up = g(*default_start, current_T + current_radius)
d_down = g(*default_start, current_T - current_radius)
## halve the radius
current_radius = current_radius / 2
## determine whether or not to move
d_swap = min(d_up, d_down)
if current_dist > d_swap:
if d_up > d_down:
current_T = current_T - current_radius
else:
current_T = current_T + current_radius
current_dist = min(current_dist, d_swap)
print("d_down: " + str(d_down) + " d_current: " + str(current_dist) + " d_up: " + str(d_up))
print("T: " + str(current_T) + " radius: " + str(current_radius) + " dist: " + str(current_dist))
print(g(*default_start, current_T))
We see that it is easy to get stuck in infinite loops while in troughs.
In [12]:
iteration_count = 1000
x = default_start
import operator
def tuple_add(a, b):
return tuple(map(operator.add, a, b) )
def tuple_subtract(a, b):
b_neg = tuple([-k for k in b])
return tuple(map(operator.add, a, b_neg) )
def list_subtract(a, b):
return list(map(operator.sub, a, b))
def approx_derivs(x):
"""Approximate partial deritatives of x"""
gx0 = g(*x)
x_1_dot = ( g(*tuple_add(x, (dt, 0, 0 , 0 ) ) ) - gx0 ) / dt
x_2_dot = ( g(*tuple_add(x, (0, dt, 0 , 0 ) ) ) - gx0 ) / dt
y_1_dot = ( g(*tuple_add(x, (0, 0, dt, 0 ) ) ) - gx0 ) / dt
y_2_dot = ( g(*tuple_add(x, (0, 0, 0 , dt) ) ) - gx0 ) / dt
return (x_1_dot, x_2_dot, y_1_dot, y_2_dot)
def newton_iterate(x):
gx0 = g(*x)
x_1_dot = ( g(*tuple_add(x, (dt, 0, 0 , 0 ) ) ) - gx0 ) / dt
x_2_dot = ( g(*tuple_add(x, (0, dt, 0 , 0 ) ) ) - gx0 ) / dt
y_1_dot = ( g(*tuple_add(x, (0, 0, dt, 0 ) ) ) - gx0 ) / dt
y_2_dot = ( g(*tuple_add(x, (0, 0, 0 , dt) ) ) - gx0 ) / dt
# for i in range(iteration_count):
# ## perform Newton iteration
# x_dot = approx_derivs(x)
# x = tuple_substract(x, x_dot)
In [13]:
import numdifftools as nd
def g_nd(x):
return g(*tuple(x))
g_hessian = nd.core.Hessian(g_nd)
g_jacobian = nd.core.Jacobian(g_nd)
In [14]:
x_0 = default_start
print(g_hessian(x_0))
print("---")
print(g_jacobian(x_0))
print("---")
print(np.linalg.inv(g_hessian(x_0)))
print("---")
print(np.matmul(np.linalg.inv(g_hessian(x_0)), np.transpose(g_jacobian(x_0))))
We now begin Newton iterations
In [15]:
x_0 = list(default_start)
x = x_0
hessian = nd.core.Hessian(g_nd)
jacobian = nd.core.Jacobian(g_nd)
for i in range(10):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(x)
print(g_nd(x))
print(x)
In [16]:
print(default_start)
In [17]:
print(default_lambda_1, default_lambda_2, default_lambda_3)
In [18]:
def newton_search(x_0, T = 1):
x = x_0
hessian = nd.core.Hessian(g_nd)
jacobian = nd.core.Jacobian(g_nd)
for i in range(100):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(x)
print(g_nd(x))
print(x)
In [19]:
x_0 = list([3, 2, 3, 2])
In [20]:
#newton_search(x_0)
In [21]:
start_plot_1 = (0, 5, -40, -40)
start_plot_1 = default_start
stepCnt = 100000
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
ws[0], xs[0], ys[0], zs[0] = start_plot_1
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
w_dot, x_dot, y_dot, z_dot = quad2(ws[i], xs[i], ys[i], zs[i])
ws[i + 1] = ws[i] + (w_dot * dt)
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
# print(w_dot, x_dot, y_dot, z_dot)
# print(ws[i], xs[i], ys[i], zs[i])
#plot_quad(ws, xs, ys, zs, 0)
#plot_quad(ws, ys, xs, zs, 0)
#plot_quad(*start_plot_1)
In [22]:
def experiment_1(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
#return [-x_1_dot, -y_1_dot, -x_2_dot, -y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def g(x_0, lmbda, T = 1):
"""objective function"""
return quad_sq_distance( f(x_0, lmbda, T), f(x_0, lmbda, 0) )
def g_T(x_0):
"""g instantiated with a fixed period"""
return g(x_0, lmbda, T)
def newton_search(x_0, T = 1, N = 25):
x = x_0
hessian = nd.core.Hessian(g_T)
jacobian = nd.core.Jacobian(g_T)
for i in range(N):
adjust = np.matmul(np.linalg.inv(hessian(x)), np.transpose( jacobian(x)))
adjust = np.transpose(adjust)[0]
#print(x)
#print(adjust)
x = list_subtract(x, adjust)
print(g_T(x))
print(x)
def plot_sim_path(x_0, T):
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
plot_quad(ws, xs, ys, zs, 0)
if expmt == 'search':
newton_search(start_pt)
if expmt == 'plot':
plot_sim_path(x_0, T)
# experiment_1((10.2,
# 9.3,
# 14.4,
# 12.2) , expmt = 'plot')
In [23]:
# experiment_1((4.2, 3.3, 4.4, 2.2),
# T = 10000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'plot')
In [24]:
# experiment_1(default_start,
# T = 1000000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'plot')
In [25]:
# experiment_1((4.2, 3.3, 4.4, 2.2),
# T = 1000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
# experiment_1(default_start,
# T = 1000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
So far: Minimize |F(T) - F(0)|^2, T = 1, s.t. F(o) = f_0
Question: how do I look for start points?
In [26]:
import scipy.integrate as integrate
import scipy.special as special
from scipy.integrate import quad
def test_integrand(t, a = 0, b = 0):
return t**2 + a - b
result = quad(test_integrand, 2, 4, args=(0.13, 1.03))
#result_simps = integrate.simps(integrand, 2, 4, args=(0.13, 1.03))
In [27]:
print("Result: " + str(result[0]) + ",\nError Bound: " + str(result[1]))
In [28]:
from scipy.optimize import newton
def experiment_2(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
#return [-x_1_dot, -y_1_dot, -x_2_dot, -y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def f_integrand(t, x_0, lmbda, T = 1):
return quad_sq_distance(f(x_0, lmbda, t + T), f(x_0, lmbda, t))
def phi(t, x_0, lmbda):
"""What we want to minimize"""
return quad(f_integrand, 0, T, args=(x_0, lmbda))[0]
def phi_instance(t):
return phi(t, start_pt, lmbda)
def newton_search(t, T = 1, N = 25):
newton(phi_instance, t)
def plot_sim_path(x_0, T):
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
plot_quad(ws, xs, ys, zs, 0)
if expmt == 'search':
newton_search(t = 100)
if expmt == 'plot':
plot_sim_path(x_0, T)
In [29]:
# experiment_2((4.2, 3.3, 4.4, 2.2),
# T = 1,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
# experiment_2(default_start,
# T = 1,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
It appears that the computation takes too long or is too expensive to be interesting.
In [30]:
# experiment_2((-4.2, 3.3, -4.4, 2.2),
# T = 100,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
Same here: It appears that the computation takes too long or is too expensive to be interesting.
Can we consider other optimization algorithms, such as:
In [31]:
class HyperPlane:
def __init__(self, a, b, c, d, e):
self.a = a
self.b = b
self.c = c
self.d = d
self.e = e
def __call__(self, xywz):
"""Determines which side of the hyperplane that xywz is on"""
return self.a * xywz[0] + self.b * xywz[1] + self.c * xywz[2] + self.d * xywz[3] - self.e
def whichSide(self, pt):
val = self.__call__(pt)
if val > 0: return 1
elif val < 0: return -1
else: return 0
def __str__(self):
return "Hyperplane: " + str(self.a) + "*x_1 + " + \
str(self.b) + "*y_1 + " + \
str(self.c) + "*x_2 + " + \
str(self.d) + "*y_2" + \
" = " + str(self.e)
In [32]:
## Testing the HyperPlane class
testplane = HyperPlane(3,2,1,3,-4)
print(testplane([2,2,2,4,1]))
print(testplane([-10,-10,-10,-10,-10]))
print(testplane.whichSide([2,2,2.4,4,1]))
print(testplane.whichSide([0, 0.0, -4.0, 0, 0]))
print(testplane.whichSide([-10.3,-10,-10,-10,-10]))
print(testplane)
In [40]:
class IntersectChecker:
def __init__(self, hyperplane = HyperPlane(1, 1, 1, 1, 4) ):
self.hyperplane = hyperplane
self.flip = 0
def __call__(self, xywz):
"""
Checks if we crossed the hyperplane given by abcde.
Returns 0 if no crossing.
Return -1 if crossed from positive to negative.
Return 1 if crossed from negative to positive
"""
val = self.hyperplane.whichSide(xywz)
if self.flip == 0:
## oj first pass
self.flip = val
return 0
elif val != self.flip:
## changed
self.flip = val
return val
else:
## unchanged
return 0
In [44]:
def poincarePlot(ws, xs, ys, zs, crossings, txt = " "):
## Plot setup
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.subplots_adjust(top=0.85)
plt.suptitle(txt, fontsize=14)
## slice
crossings_array = np.array(crossings)
indices = list(np.where(crossings_array < 0)[0])
ws = list(np.array(ws)[indices])
xs = list(np.array(xs)[indices])
ys = list(np.array(ys)[indices])
## execute
ax.plot(ws, xs, ys)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title(txt)
plt.show()
In [48]:
def poincareExtract(ws, xs, ys, zs, crossings):
## slice
crossings_array = np.array(crossings)
indices = list(np.where(crossings_array < 0)[0])
print("crossings: " + str(len(indices)))
ws = list(np.array(ws)[indices])
xs = list(np.array(xs)[indices])
ys = list(np.array(ys)[indices])
zs = list(np.array(zs)[indices])
return ws, xs, ys, zs
In [62]:
def experiment_3(start_pt = default_start,
T = 1,
lmbda = [default_lambda_1, default_lambda_2, default_lambda_3],
res = 0.001,
hyperplane = HyperPlane(0.2, 0.4, 1.2, -2.1, -1.1),
expmt = "search"):
## define evaluation function
def dots(x_0, lmbda):
"""
dz1/dt = lambda_2 * z1^2 - (lambda_2 + lambda_3) * z1 * z2
dz2/dt = lambda_1 * z2^2 - (lambda_1 + lambda_3) * z1 * z2
http://www.math.kit.edu/iag3/~herrlich/seite/wws-11/media/wws-talk-valdez.pdf
"""
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
# print(lmbda)
lambda_1 = lmbda[0]
lambda_2 = lmbda[1]
lambda_3 = lmbda[2]
x_1_dot = lambda_2 * (x_1**2 - y_1**2) - (lambda_2 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_1_dot = 2 * lambda_2 * x_1 * y_1 - (lambda_2 + lambda_3) * (x_1*y_2 + y_1*x_2)
x_2_dot = lambda_1 * (x_2**2 - y_2**2) - (lambda_1 + lambda_3) * (x_1*x_2 - y_1*y_2)
y_2_dot = 2 * lambda_1 * x_2 * y_2 - (lambda_1 +lambda_3) * (x_1*y_2 + y_1*x_2)
return [x_1_dot, y_1_dot, x_2_dot, y_2_dot]
## if reversing time
#return [-x_1_dot, -y_1_dot, -x_2_dot, -y_2_dot]
def f(x_0, lmbda, T = 1):
"""Find f(x_0 + T)"""
### TODO: refactor, make into array, then transpose
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1, ))
xs = np.empty((stepCnt + 1, ))
ys = np.empty((stepCnt + 1, ))
zs = np.empty((stepCnt + 1, ))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
# Stepping through "time".
for i in range(stepCnt):
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
return [ ws[-1], xs[-1], ys[-1], zs[-1] ]
def f_integrand(t, x_0, lmbda, T = 1):
return quad_sq_distance(f(x_0, lmbda, t + T), f(x_0, lmbda, t))
def phi(t, x_0, lmbda):
"""What we want to minimize"""
return quad(f_integrand, 0, T, args=(x_0, lmbda))[0]
def phi_instance(t):
return phi(t, start_pt, lmbda)
def newton_search(t, T = 1, N = 25):
newton(phi_instance, t)
def plot_sim_path(x_0, T):
"""Simulate path, collecting Poincare crossings"""
stepCnt = math.ceil(T / dt)
# Need one more for the initial values
ws = np.empty((stepCnt + 1,))
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
crossings = np.empty((stepCnt + 1,))
# Setting initial values
x_1 = x_0[0]
y_1 = x_0[1]
x_2 = x_0[2]
y_2 = x_0[3]
ws[0], xs[0], ys[0], zs[0] = x_1, y_1, x_2, y_2
crossings[0] = 0
intersect_checker = IntersectChecker(hyperplane)
## for trackcing min/max/mean of path, relative to hyperplane
pts = np.empty((stepCnt,))
# Stepping through "time".
for i in range(stepCnt):
# Derivatives of the W, X, Y, Z state
derivs = dots([ ws[i], xs[i], ys[i], zs[i] ], lmbda )
ws[i + 1] = ws[i] + (derivs[0] * dt)
xs[i + 1] = xs[i] + (derivs[1] * dt)
ys[i + 1] = ys[i] + (derivs[2] * dt)
zs[i + 1] = zs[i] + (derivs[3] * dt)
pt = (ws[i + 1], xs[i + 1], ys[i + 1], zs[i + 1])
pts[i] = hyperplane(pt)
# print(hyperplane(pt))
crossings[i + 1] = intersect_checker((ws[i + 1], xs[i + 1], ys[i + 1], zs[i + 1]))
print(max(pts))
print(min(pts))
print(sum(pts) / len(pts))
poincareExtract(ws, xs, ys, zs, crossings)
poincarePlot(ws, xs, ys, zs, crossings, str(hyperplane))
if expmt == 'print':
print("not yet implemented")
if expmt == 'plot':
plot_sim_path(x_0, T)
In [ ]:
# experiment_3((4.2, 3.3, 4.4, 2.2),
# T = 1000,
# lmbda = [0.086, 0.141, 0.773],
# expmt = 'search')
experiment_1( (0.032, 0.308, -0.1, -0.5) ,
T = 10000,
lmbda = [0.086, 0.141, 0.773],
expmt = 'plot')
# experiment_3(default_start,
# T = 10000,
# lmbda = [0.086, 0.141, 0.773],
# hyperplane = HyperPlane(4, -3, -1, -4, 0),
# expmt = 'plot')
Playing around with various parameters has not been fruitful. We may need to increase the search volume.
x1' = -x1 + x1x2 + x1x3
x2' = 3x2x4 - 2x1x2
x3' = x3x4 - x3x1 + x2*x1
x4' = -x3x4 - 3x2*x4 + x1
It should have a periodic orbit. The change in the code should be minimal but this can test the Poincare code.
In [ ]: