In [1]:
import pyaudi as pa
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set_style("whitegrid")
from Utils import DA as da
from Utils.RK4 import RK4
In [ ]:
Define some nonlinear dynamics with a parameter to include in the expansion
In [2]:
def dyn(x, t, p, tf):
dx1 = x[1]
dx2 = -x[0] + p*(1-x[0]**2)*x[1]
return np.array([dx1,dx2])*tf
Let's look at a few sample paths
In [27]:
tf = 10
N = 50
x0 = 2*np.random.random((2,N))
x0 = np.ones_like(x0) # Deterministic ic
p = np.random.random((N,))
t = np.linspace(0,1,1000) # Idea - could we make the entire t array DA variables and expand that way? Then move the points optimally?!
x = RK4(dyn, x0, t, (p,tf))
print x.shape
for xi in np.transpose(x,(2,1,0)):
plt.figure(1)
plt.plot(t*tf, xi[0])
plt.figure(2)
plt.plot(t*tf, xi[1])
plt.show()
In [32]:
names = ['x','y','p','tf']
x0_nom = np.array([1,1])
p_nom = np.mean(p)
tf_nom = tf
x0_da = da.make([x0_nom[0],x0_nom[1],p_nom,tf_nom], names, 2)
x_da = RK4(dyn, x0_da[0:2], t, (x0_da[2], x0_da[3]))
delta = np.concatenate((x0.T-x0_nom, p[:,None]-p_nom, tf*np.ones_like(p[:,None])-tf_nom),axis=1)
# Verify we're evaluating at the correct points - these should match perfectly
# print x0.T[0]
# print da.evaluate(x_da[0], names, delta[:1])[0]
x_eval = np.array([da.evaluate(x_da_i, names, delta).T for x_da_i in x_da])
print x_eval.shape
error = np.abs(x-x_eval)
err_mean = np.mean(error, axis=2)
err_var = np.var(error, axis=2)
plt.figure(1,figsize=(10,10))
plt.plot(t, err_mean)
plt.plot(t, err_mean+3*err_var, 'k--')
# plt.plot(t, err_mean-3*err_var, 'k--')
plt.show()
In [33]:
colors = np.linalg.norm(delta, axis=1)
colors = 1-(colors - colors.min())/(colors.max()-colors.min()) # Largest perturbations are black
# plot = plt.plot
# for err,color in zip(np.transpose(error,(2,0,1)),colors):
# plt.figure(1)
# plot(t*tf, err[:,0], color='{}'.format(color))
# plt.figure(2)
# plot(t*tf, err[:,1], color='{}'.format(color))
plt.figure(figsize=(10,10))
for xi,color in zip(np.transpose(x,(2,1,0)),colors):
# plt.plot(t*tf, xi.T)
plt.plot(xi[0], xi[1],color='{}'.format(color))
for xi,color in zip(np.transpose(x_eval,(2,1,0)),colors):
plt.plot(xi[0], xi[1], 'o--', color='{}'.format(color))
plt.show()
In [ ]:
In [2]:
def dyn_sat(x, t, tf=5):
dx1 = x[1]
dx2 = np.clip((-x[0] + 1*(1-x[0]**2)*x[1]),-1,1)
return np.array([dx1,dx2])*tf
# from Utils.DA import clip
def dyn_tanh(x, t, tf=5):
dx1 = x[1]
dx2 = da.clip((-x[0] + 1*(1-x[0]**2)*x[1]),-1,1)
return np.array([dx1,dx2])*tf
In [3]:
N = 5
x0 = (np.array([1,1]) + np.random.normal(size=(N,2))/30).T
t = np.linspace(0,1)
x = RK4(dyn_sat, x0, t)
In [4]:
for xi in np.transpose(x,(2,1,0)):
# plt.figure(1)
# plt.plot(t, xi[0])
# plt.figure(2)
# plt.plot(t, xi[1])
plt.figure(3)
plt.plot(xi[0],xi[1])
plt.show()
In [7]:
names = ['x','y']
x0_nom = np.array([1.,1.])
x0_da = da.make(x0_nom, names, 3)
x_da = RK4(dyn_tanh, x0_da, t)
delta = x0.T-x0_nom
# Verify we're evaluating at the correct points - these should match perfectly
# print x0.T[0]
# print da.evaluate(x_da[0], names, delta[:1])[0]
x_eval = np.array([da.evaluate(x_da_i, names, delta).T for x_da_i in x_da])
print(x_eval.shape)
error = np.abs(x-x_eval)
err_mean = np.mean(error, axis=2)
err_var = np.var(error, axis=2)
plt.figure(1,figsize=(10,10))
plt.plot(t, err_mean)
plt.plot(t, err_mean+3*err_var, 'k--')
plt.show()
In [10]:
colors = np.linalg.norm(delta, axis=1)
colors = 1-(colors - colors.min())/(colors.max()-colors.min()) # Largest perturbations are black
tf = 1
plot = plt.plot
for err,color in zip(np.transpose(error,(2,0,1)),colors):
plt.figure(1)
plot(t*tf, err[:,0], color='{}'.format(color))
plt.figure(2)
plot(t*tf, err[:,1], color='{}'.format(color))
plt.figure(figsize=(10,10))
for xi,color in zip(np.transpose(x,(2,1,0)),colors):
# plt.plot(t*tf, xi.T)
plt.plot(xi[0], xi[1],color='{}'.format(color))
for xi,color in zip(np.transpose(x_eval,(2,1,0)),colors):
plt.plot(xi[0], xi[1], 'o', color='{}'.format(color))
plt.show()
In [2]:
def dyn(x, t, p=1.):
dx1 = x[1]
dx2 = -x[0] + p*(1-x[0]**2)*x[1]
return np.array([dx1,dx2])
In [4]:
x0 = np.array([1, 1.5])
t = np.linspace(0,3)
x = RK4(dyn, x0, t)
In [13]:
X0 = da.make(x0, 'xy', 2)
X = RK4(dyn, X0, t)
Xf = X[-1]
In [20]:
# print(da.jacobian(Xf, 'xy'))
STM = np.array([da.differentiate(xf, 'xy') for xf in Xf])
# print(STM)
P0 = np.diag([0.1, 0.1])
Pf = STM.dot(P0).dot(STM.T)
# print(Pf)
J = Pf[0,0] + Pf[1,1]
dJ = da.differentiate(J, 'xy')
# print(dJ)
print("J_x0 = {}".format(da.const(dJ)))
print("J_xx = {}".format(da.jacobian(dJ, 'xy')))
In [2]:
def dyn(x, t, a, u):
return a*x + u #- 0.5*a**2*x**3
a = 0.1
f1 = lambda x,t: dyn(x,t,a,-1)
f2 = lambda x,t: dyn(x,t,a,0)
f3 = lambda x,t: dyn(x,t,2*a,1)
def switched_dyn(x, t, f, s, *args):
i = np.searchsorted(s, t, side='right') # Best solution, O(1), s must be sorted
return f[i](x, t, *args)
In [3]:
tf = 3
s = [1.5, 2.]
t = np.linspace(0, tf, 500)
x0 = [1]
x = RK4(switched_dyn, x0, t, ([f1,f2,f3], s))
In [4]:
# Compute the derivative wrt a switch
# Then show that x(new_time) = x(old_time) + dx/dti * (new_time-old_time)
y0 = [pa.gdual_double(x0[0], 'x0', 1)]
y = RK4(switched_dyn, y0, t, ([f1,f2,f3], s))
J = (0.5 * y[-1]**2)[0]
print(J)
i = np.argmin(np.abs(t-s[0]))
j = np.argmin(np.abs(t-s[1]))
STMf = da.jacobian(y[-1], ['x0'])
STMi = da.jacobian(y[i], ['x0'])
STMj = da.jacobian(y[j], ['x0'])
STM1 = STMf/STMi
STM2 = STMf/STMj
alpha = 0.165
ds = -alpha*2*x[-1]*np.array([STM1*(f1(x[i], s[0]) - f2(x[i], s[0])), (STM2*(f2(x[j], s[1]) - f3(x[j], s[1])))]).squeeze()
# ds = np.array([0., -0.25])
xnew_stm = x[-1] + STM1*(f1(x[i], s[0]) - f2(x[i], s[0])) * ds[0] \
+ STM2*(f2(x[j], s[1]) - f3(x[j], s[1])) * ds[1]
snew = s + ds
xnew = RK4(switched_dyn, x0, t, ([f1,f2,f3], snew))
xnm = RK4(switched_dyn, x0, t, ([f1,f2,f3], [1.43703461, 2.54234009])) # Using Nelder-Mead method
plt.plot(t, x, label='original')
plt.plot(t, xnew, label='new')
plt.plot(t, xnm, label='opt')
plt.plot(tf, xnew_stm, 'o')
plt.legend()
plt.show()
# dJdx = da.gradient(J, ['x0'])/STMf
dJdx = x[-1]
print("Original Cost: {}".format(J.constant_cf))
print("new Cost: {}".format(0.5*xnew[-1]**2))
print("Estimated Cost: {}".format(J.constant_cf + dJdx*(xnew_stm-x[-1]) + 0*0.5*(xnew_stm-x[-1])**2))
In [7]:
def obj(s):
tf = 3
t = np.linspace(0, tf, 500)
x0 = [1]
y0 = [pa.gdual_double(x0[0], 'x0', 1)]
y = RK4(switched_dyn, y0, t, ([f1,f2,f3], s)).squeeze()
alpha = 1e-3
J = 0.5*(y[-1].constant_cf)**2 + alpha*(s[0]-s[1])
dJdx = y[-1].constant_cf #da.gradient(J, ['xf'])
i = np.argmin(np.abs(t-s[0]))
j = np.argmin(np.abs(t-s[1]))
STMf = da.gradient(y[-1], ['x0'])
STMi = da.gradient(y[i], ['x0'])
STMj = da.gradient(y[j], ['x0'])
STM1 = STMf/STMi
STM2 = STMf/STMj
dJds = alpha*np.array([1, -1])
dxds = np.array([STM1*(f1(x[i], s[0]) - f2(x[i], s[0])), STM2*(f2(x[j], s[1]) - f3(x[j], s[1]))])
return J, np.squeeze(dJdx*dxds)+dJds
J, dJds = obj([1.5, 2])
In [111]:
print(J)
print(dJds)
print(x[-1]*STM1*(f1(x[i], s[0]) - f2(x[i], s[0])),
x[-1]*STM2*(f2(x[j], s[1]) - f3(x[j], s[1])))
In [ ]:
from scipy.optimize import minimize
for method in ["SLSQP", "Nelder-Mead", "BFGS"]:
sol = minimize(obj, s, jac=True, method=method)
print(sol)
print("\n")
sol = minimize(obj, s, jac=True, method="trust-constr", hess="2-point")
print(sol)