In [1]:
from pyaudi import gdual_double, gdual_vdouble
import pyaudi as pa
import numpy as np
import chaospy as cp
import seaborn
# seaborn.set_style('whitegrid')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
print(plt.style.available)
plt.style.use("seaborn-whitegrid")
from Utils.boxgrid import boxgrid
import Utils.DA as da
from Utils.RK4 import RK4
# %matplotlib ipympl
In [2]:
class Interval:
def __init__(self, lower, upper):
assert upper > lower, "Not a valid interval"
self.interval = (lower, upper)
self.midpoint = 0.5*(lower+upper)
self.size = upper-lower
def split(self):
I1 = Interval(self.interval[0], self.midpoint)
I2 = Interval(self.midpoint, self.interval[1])
return I1,I2
def __str__(self):
return "I[{}, {}]".format(*self.interval)
def contains(self, x):
""" Vectorized method to check if an array of variables is in the interval"""
x = np.asarray(x)
return np.logical_and(x<=self.interval[1], x>=self.interval[0])
def __contains__(self, x):
"""Allows use of 'value in Interval' syntax
but only works for scalar x. Use the contains method
for vectorized membership checking.
"""
return x<=self.interval[1] and x>=self.interval[0]
# class History:
# """ A collection of intervals """
# def __init__(self)
# self._history = []
# def find(self, val):
# """ Determines the interval in which value lies"""
def Construct(interval, nsubintervals):
""" Decomposes an interval into n sub-intervals of equal length"""
a,b = interval.interval
ends = np.linspace(a, b, nsubintervals+1)
return [Interval(a,b) for a,b in zip(ends, ends[1:])]
def find_nearest(array, values):
""" Finds the deltas at which to evaluate the DAs, and
returns a collection of indices referring to which results to keep
Inputs:
array - array of evaluation points
values - array of expansion points [N_points, n_dims]
"""
array = np.asarray(array)
values = np.asarray(values)
big_array = np.array([array-value for value in values]) # array of all possible deltas
# print(big_array)
# print(np.linalg.norm(big_array, axis=2))
if array.squeeze().ndim == 1:
idx = np.abs(big_array).argmin(axis=0)
else:
idx = np.linalg.norm(big_array, axis=2).argmin(axis=0)
midx = [(a,b) for a,b in zip(idx, range(len(idx)))]
return [big_array[tup] for tup in midx], midx
# print(find_nearest(np.linspace(0,2,11),[0.33,0.66, 1, 1.33, 1.66]))
In [3]:
# 1-D Test
d,midx = find_nearest(np.linspace(0,2,3),[0.5,1.5])
print(d)
# Same thing with a useless second element
evals = np.zeros((2,3))
evals[0,:] = np.linspace(0,2,3)
print(evals)
In [4]:
exp = [[0.5,0.2],[0.2,0.]]
ev = [[0.25,-0.1],[0.5,0.1]]
print(find_nearest(ev,exp))
In [5]:
# Another 2-D Test
exp = [[0.25,0.25],[0.75,0.75]]
ev = [[0.25,0.3],[0.9,0],[0.6,0.4]]
print(find_nearest(ev,exp))
In [69]:
domain = (-1,1)
def fun(x):
if isinstance(x, (gdual_double, gdual_vdouble)):
return pa.sin(5*x) + pa.exp(pa.abs(x))
else:
return np.sin(5*x) + np.exp(np.abs(x))
x = np.linspace(*domain)
y = fun(x)
plt.figure(1)
plt.plot(x,y)
plt.xlabel('Domain')
plt.ylabel('F(x)')
plt.show()
Try single polynomials of different orders.
In [76]:
I = Interval(*domain)
plt.figure()
plt.plot(x,y,'o',label='f(x)')
plt.xlabel('Domain')
plt.ylabel('F(x)')
for order in [2,3,4,7,13]:
xda = gdual_double(I.midpoint, 'x', order)
yda = fun(xda)
print(yda)
ye = da.evaluate([yda], 'x', x)
plt.plot(x,ye,label='O={}'.format(order))
plt.legend()
plt.axis([-1,1,-1,4])
Out[76]:
In general, the vectorized duals scale very favorably with more points. This means that we should favor more expansion points over higher order integrations. Thus a low order (2-5) expansion should be used with as many expansion points as needed to capture the reachable set with sufficient accuracy. However, this benefit may be hampered by the curse of dimensionality. In order to cover the n-dimensional volume a substantial number of points may be necessary.
In [43]:
I0 = Construct(I,4)
expansion_points = [Ii.midpoint for Ii in I0]
deltas, keep = find_nearest(x, expansion_points)
xda = gdual_vdouble(expansion_points, 'x', 4)
yda = fun(xda)
ye = da.evaluate([yda], 'x', deltas).squeeze() # This is all expansion points evaluated at x points
sol = [ye.T[pair] for pair in keep]
y_expansion = yda.constant_cf
In [46]:
plt.figure(1, figsize=(14,8))
plt.plot(x,y,'b-',label='f(x)')
plt.plot(x,sol,'r--',label='Approximation')
plt.plot(expansion_points,y_expansion,'ko',label='Expansion Points')
plt.xlabel('Domain')
plt.ylabel('F(x)')
plt.legend()
Out[46]:
In [49]:
domain = [(-1,1)]*2
def fun(x):
if isinstance(x[0], (gdual_double, gdual_vdouble)):
return pa.sin(5*x[0]) + pa.cos(x[1] + x[0]**2)*pa.exp(-pa.abs(x[1])) + x[0]*x[1]**3 - x[1]**2
else:
return np.sin(5*x[0]) + np.cos(x[1] + x[0]**2)*np.exp(-np.abs(x[1])) + x[0]*x[1]**3 - x[1]**2
x = boxgrid(domain, 60, True).T
print(x.shape)
y = fun(x)
# plt.figure(figsize=(12,12))
# plt.scatter(x[0],x[1],c=y)
# plt.colorbar()
# plt.xlabel('Domain X')
# plt.ylabel('Domain Y')
X,Y = np.meshgrid(x[0],x[1])
Z = fun([X,Y])
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, cmap='hot')
In [50]:
order = 5
reduced_domain = [(-0.98,0.98)]*2
x_exp = boxgrid(reduced_domain, [20,12], True).T # One way to get expansion points. Can also use sobol or other LD sequences
# u1 = cp.Uniform(*domain[0])
# u2 = cp.Uniform(*domain[1])
# U = cp.J(u1,u2)
# x_exp = U.sample(64,'S')
print(x_exp.shape)
deltas, keep = find_nearest(x.T, x_exp.T)
print(np.array(deltas).shape)
xda = [gdual_vdouble(x_exp[0], 'x', order), gdual_vdouble(x_exp[1], 'y', order)]
yda = fun(xda)
ye = da.evaluate([yda], ['x','y'], deltas).squeeze() # This is all expansion points evaluated at x points
print(ye.shape)
sol = [ye.T[pair] for pair in keep]
print(np.shape(sol))
y_expansion = yda.constant_cf
In [51]:
plt.figure(figsize=(16,6))
plt.subplot(121)
plt.scatter(x[0],x[1],c=y)
plt.colorbar()
plt.xlabel('Domain X')
plt.ylabel('Domain Y')
plt.title('True f(x,y)')
plt.subplot(122)
plt.scatter(x[0],x[1],c=np.array(sol))
plt.title('Taylor Map Composition')
plt.colorbar()
plt.xlabel('Domain X')
plt.ylabel('Domain Y')
err = np.abs(np.array(sol)-y)
tol = np.mean(err)
print("Peak error = {:.3g}".format(err.max()))
print("Average error = {:.3g}".format(err.mean()))
plt.figure()
plt.scatter(x[0],x[1],c=err>tol)
plt.plot(x_exp[0], x_exp[1],'wo')
# plt.colorbar()
plt.xlabel('Domain X')
plt.ylabel('Domain Y')
plt.title('Error Map\n |true-expansion| < average error = {:.4f} in black'.format(tol))
plt.figure()
plt.scatter(x[0],x[1],c=err)
plt.colorbar()
plt.xlabel('Domain X')
plt.ylabel('Domain Y')
plt.title('Error Map\n |true-expansion|')
Out[51]:
In [30]:
fig = plt.figure(figsize=(12,12))
ax = fig.gca(projection='3d')
surf = ax.scatter(x[0],x[1],err,c=err, cmap='hot')
Start with a simple task. The ODE below describes a controlled double integrator with an uncertain drag-like term. \begin{align} \dot{x}_1 &= x_2 \\ \dot{x}_2 &= -ax_2^2 + u \\ \end{align} where $a\in[0.5,3]$ and we arbitrarily close the loop $u = -kx_2$. The goal is to find the smallest value of $k$ such that $\mathbb{P}[x_1<2]>=\eta$ at a fixed time, i.e. a chance constrained problem. $\eta=1$ is equivalent to requiring that the entire distribution meets the constraint. The solution for $t_f=5$, $\eta=1$ is approximately $k=2.7$, while for $\eta=0.95$ $k=2.52$. Naturally, accepting more constraint violation allows for a lower gain to be used in this situation.
In [52]:
def dynamics(x, t, a, k):
dx = np.zeros_like(x, dtype=type(a))
dx[0] = x[1]
dx[1] = -a*x[1]**2 - k*x[1]
return dx
tf = 3
t = np.linspace(0, tf, 30)
x0 = [1,3.5]
AK = boxgrid([(0.5, 3),(1, 3)], 70, True).T
# a = np.linspace(0.5,3,200)
a,k = AK
# k = 2.52
X0 = [np.ones_like(a)*x0[0], np.ones_like(a)*x0[1]]
x = RK4(dynamics, X0, t, args=(a, k))
# print(x.shape)
# %matplotlib qt
plt.figure(figsize=(8,8))
# ax = plt.gca(projection='3d')
# print("Max value of x[0] = {} (constraint < 2)".format(x[-1, 0].max()))
# print("95%-ile of x[0] = {} (constraint < 2)".format(np.percentile(x[-1, 0],95)))
plt.title('Reachable set at $t$ = {} s for $a\in[{},{}]$'.format(tf,a[0],a[-1]))
plt.scatter(x[-1, 0], x[-1, 1], c=a)
plt.colorbar()
Out[52]:
Learn the Taylor map from parameter values
In [53]:
ak_exp = boxgrid([(0.6, 2.9),(1.1, 2.9)], 10, True, True).T
a,k = da.make(ak_exp, ['a','k'], 3, array=True, vectorized=True)
X0 = [x0[0]*np.ones_like(ak_exp[0]),x0[1]*np.ones_like(ak_exp[0])]
# X0 = da.make(X0, ['x1','x2'], orders=0, array=True, vectorized=True) # May not need to do this
X0 = x0
xda = RK4(dynamics, X0, t, args=(a, k))
xf = xda[-1]
deltas, keep = find_nearest(AK.T, ak_exp.T)
ye = da.evaluate(xf, ['a','k'], deltas) # This is all expansion points evaluated at x points
sol = np.array([[ye[:,i].T[pair] for pair in keep] for i in range(2)])
y_expansion = da.const(xf)
perim = boxgrid([(0.5, 3),(1, 3)], 2, False).T
print(perim.shape)
deltas, keep = find_nearest(perim.T, ak_exp.T)
yperim = da.evaluate(xf, ['a','k'], deltas) # This is all expansion points evaluated at x points
sol_perim = np.array([[ye[:,i].T[pair] for pair in keep] for i in range(2)])
In [56]:
err = np.abs(x[-1]-sol)
errnorm = (err[0]**2 + err[1]**2)**0.5
plt.figure(figsize=(16,8))
plt.subplot(121)
# plt.gca().set_yscale('log')
plt.scatter(AK[0], errnorm, c=AK[1])
plt.colorbar()
plt.xlabel('Value of a')
plt.ylabel('Norm of error in both states')
plt.title('Color = feedback gain value')
# plt.axis([0.4, 3,-0.0001, 0.001])\
# plt.figure()
plt.subplot(122)
plt.scatter(sol[0],sol[1],c=AK[0], cmap='hot')
plt.plot(y_expansion[0],y_expansion[1],'w^')
plt.title('Taylor Map Composition')
plt.colorbar()
plt.xlabel('Reachable X')
plt.ylabel('Reachable Y')
plt.figure(figsize=(8,8))
plt.scatter(sol[0],sol[1],c=errnorm)
plt.colorbar()
plt.xlabel('Reachable X')
plt.ylabel('Reachable Y')
plt.title('Error')
# plt.figure(figsize=(8,8))
plt.scatter(sol_perim[0], sol_perim[1], marker='*',color='g')
x1min = sol_perim[0].min()
x1max = sol_perim[0].max()
x2min = sol_perim[1].min()
x2max = sol_perim[1].max()
plt.vlines([x1min,x1max],x2min,x2max)
plt.hlines([x2min,x2max],x1min,x1max)
plt.legend(('Norm of Error','Rectangle edges mapped forward','Enclosure'))
Out[56]:
In [57]:
# k = np.floor(t)
# plt.plot(t,k)
def dyn(x, t, u):
dx = np.zeros_like(x, dtype=type(u[0]))
dx[0] = x[1]
dx[1] = -0.1*x[1]**2 + u[int(t)]/x[2] # drag-like term + control/mass
dx[2] = -0.1*u[int(t)]**2 # mass-like term
return dx
def fun(u, N):
one = np.ones_like(u[0])
x0 = [1*one,-0.5*one,5*one]
t = np.linspace(0, N*0.98, 50)
x = RK4(dyn, x0, t, args=(u,))
return t, x
# test
N = 4
# u = np.zeros((N+1, ))
u_bounds = (-3,3)
u_eval = boxgrid([u_bounds]*N, 8, True, False).T
t, xtrue = fun(u_eval,N)
print(xtrue.shape)
# print(x[1])
Build a Taylor map of the control points
In [58]:
u_int = (-2.6,2.6)
u_exp = boxgrid([u_int]*N, 4, True).T
names = ["u{}".format(i) for i in range(N)]
u = da.make(u_exp, names, 2, array=True, vectorized=True)
print(u.shape)
u = np.append(u, u[-1])
In [59]:
t, x = fun(u, N)
xf = x[-1]
In [60]:
# u_eval = boxgrid([u_bounds]*N, 6, True, False).T
deltas, keep = find_nearest(u_eval.T, u_exp.T)
ye = da.evaluate(xf, names, deltas) # This is all expansion points evaluated at x points
sol = np.array([[ye[:,i].T[pair] for pair in keep] for i in range(3)])
y_expansion = da.const(xf)
plt.figure(figsize=(16,8))
plt.subplot(122)
plt.scatter(sol[0], sol[1], c=sol[2])
# plt.plot(y_expansion[0],y_expansion[1],'w^')
plt.title('Taylor Map Composition')
plt.colorbar()
plt.xlabel('Reachable X')
plt.ylabel('Reachable V')
plt.subplot(121)
plt.scatter(xtrue[-1, 0], xtrue[-1, 1],c=xtrue[-1,2])
plt.colorbar()
plt.title('True values from integration')
err = sol - xtrue[-1]
errnorm = (err[0]**2 + err[1]**2 + err[2]**2)**0.5
plt.figure(figsize=(8,8))
plt.scatter(sol[0],sol[1],c=errnorm)
plt.colorbar()
plt.xlabel('Reachable X')
plt.ylabel('Reachable Y')
plt.title('Error')
Out[60]:
In [61]:
def dyn(x,t,k):
dx = np.zeros(np.shape(x),dtype=type(k[0]))
dx[0] = x[1]
dx[1] = -da.clip(k[0]*x[0] + k[1]*x[1], -2, 2)
return dx
In [62]:
tf = 10
t = np.linspace(0, tf, 300)
x0 = [3.5, 1]
K = boxgrid([(0.1, 5)]*2, 10, True).T
X0 = [np.ones_like(K[0])*x0[0], np.ones_like(K[0])*x0[1]]
x = RK4(dyn, X0, t, args=(K,))
print(x.shape)
plt.figure()
lines = plt.plot(t, x[:,0,:])
plt.figure()
plt.scatter(x[-1,0,:], x[-1,1,:])
Out[62]:
In [ ]:
In [234]:
import numpy as np
def dynamics(x, t):
dx = np.zeros_like(x)
dx[0] = x[1]
dx[1] = -np.sin(x[0])
return dx
tf = 23.
t = np.linspace(0, tf, 1000)
X0 = boxgrid([(1-0.035, 1+0.035), (-0.035, 0.035)], 50, True).T
x = RK4(dynamics, X0, t, args=())
plt.figure(1, figsize=(8,8))
plt.title('Reachable set at t = {} s'.format(tf))
plt.scatter(x[-1, 0], x[-1, 1])
P0 = np.cov(X0)
P = np.cov(x[-1])
m = np.mean(x[-1], axis=1)
from Utils import draw
draw.cov(m, P, ci=[0.95], fignum=1, legend=False)
# plt.colorbar()
# sanity
from scipy.integrate import odeint
y2 = odeint(dynamics, [1,0], t)[-1]
print(y2)
In [242]:
def dynamics_da(x, t, dtype=pa.gdual_vdouble):
dx = np.zeros_like(x, dtype=dtype)
dx[0] = x[1]
dx[1] = -pa.sin(x[0])
return dx
names = ['x0','y0']
X0_exp = boxgrid([(1-0.035, 1+0.035), (-0.035, 0.035)], 5, True, chebyshev=False).T
X0_da = da.make(X0_exp, names, 3, vectorized=True, array=True)
X_da = RK4(dynamics_da, X0_da, t, args=())
xf = X_da[-1]
In [250]:
x0_scalar = da.make([1., 0.], names, 3, array=True)
xs = RK4(dynamics_da, x0_scalar, t, args=(pa.gdual_double,))
xfs = xs[-1]
G = da.jacobian(xfs, names)
Pf = G.dot(P0).dot(G.T)
# print(Pf) # Linear covariance prop
print(np.linalg.eig(Pf))
# print(P) # Sampling
print(np.linalg.eig(P))
vals,vecs = np.linalg.eig(Pf)
va, ve = np.linalg.eig(P)
print(np.abs(va-vals))
print(np.arccos(np.dot(vecs[0],ve[0]))*180/np.pi)
print(np.arccos(np.dot(vecs[1],ve[1]))*180/np.pi)
In [ ]:
It may be a good idea to use both the magnitudes and directions to trigger splitting. If either the angle is too large, or the difference between a given eigenvalue
In [244]:
Xeval = X0
deltas, keep = find_nearest(Xeval.T, X0_exp.T)
print(np.shape(deltas))
ye = da.evaluate(xf, names, deltas) # This is all expansion points evaluated at x points
sol = np.array([[ye[:,i].T[pair] for pair in keep] for i in range(2)])
y_expansion = da.const(xf)
ys = da.evaluate(xfs, names, Xeval.T-np.array([1,0])[:,None].T).T
err = np.linalg.norm(sol - x[-1], axis=0)
print("Average error = {}".format(np.mean(err)))
print("Maximum error = {}".format(np.max(err)))
err_scalar = np.linalg.norm(ys - x[-1], axis=0)
print("\nUsing a single central expansion point:")
print("Average error = {}".format(np.mean(err_scalar)))
print("Maximum error = {}".format(np.max(err_scalar)))
In [245]:
plt.style.use("seaborn-whitegrid")
cmap = "jet"
plt.figure(1, figsize=(17,8))
# plt.title('Reachable set at $t$ = {} s $'.format(tf))
plt.subplot(1,2,1)
# plt.scatter(sol[0], sol[1], c=err, cmap='hot')
plt.scatter(x[-1, 0], x[-1, 1], c=err, cmap=cmap)
plt.colorbar()
plt.plot(y_expansion[0], y_expansion[1], 'wo', markersize=12, mfc='k')
plt.subplot(1,2,2)
# plt.scatter(ys[0], ys[1], c=err_scalar, cmap='hot')
plt.scatter(x[-1, 0], x[-1, 1], c=err_scalar, cmap=cmap)
plt.plot(*y2, 'wo', markersize=12, mfc='k')
plt.colorbar()
plt.figure(2, figsize=(17,8))
# plt.title('Initial set')
plt.subplot(1,2,1)
plt.title("Multiple Expansion Points")
plt.scatter(X0[0], X0[1], c=err, cmap=cmap)
plt.colorbar()
plt.plot(X0_exp[0], X0_exp[1], 'wo', markersize=12, mfc='k')
plt.subplot(1,2,2)
plt.title("Single Expansion Point")
plt.scatter(X0[0], X0[1], c=err_scalar, cmap=cmap)
plt.colorbar()
Out[245]:
In [49]:
xmin = np.min(x[:,0,:])
xmax = np.max(x[:,0,:])
ymin = np.min(x[:,1,:])
ymax = np.max(x[:,1,:])
for i, y in enumerate(x):
plt.figure(figsize=(8,8))
plt.title('Reachable set at t = {} s '.format(t[i]))
plt.scatter(y[0], y[1])
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.savefig(".\data\gif\Simulation-{}".format(i))
plt.close()
In [50]:
import imageio
files = ['.\data\gif\Simulation-{}.png'.format(i) for i,_ in enumerate(t)]
output = '.\data\gif\Simulation.gif'
with imageio.get_writer(output, mode='I', duration=0.25) as writer:
for filename in files:
image = imageio.imread(filename)
writer.append_data(image)
In [ ]: