In [1]:
from casadi import *
from casadi.tools import * # for dotdraw
from matplotlib.pyplot import *
%matplotlib inline
In [2]:
x = SX.sym("x") # scalar symbolic primitives
y = SX.sym("y")
z = x*sin(x+y) # common mathematical operators
In [3]:
print z
In [4]:
dotdraw(z,direction="BT")
In [5]:
J = jacobian(z,x)
print J
In [6]:
dotdraw(J,direction="BT")
Note 1: subexpressions are shared.
Graph $\leftrightarrow$ Tree
Different from Maple, Matlab symbolic, sympy, ...
A (very) little bit of Computer Algebra
In [7]:
print x*y/x-y
In [8]:
H = hessian(z,x)
print H
Sort graph into algorithm
In [9]:
f = SXFunction('f',[x,y],[z])
f.init()
print f
Note 2: re-use of tape variables: live-variables
In [10]:
f.setInput(1.2,0)
f.setInput(3.4,1)
f.evaluate()
print f.getOutput(0)
In [11]:
print f([1.2,3.4])
In [12]:
print f([1.2,x+y])
In [13]:
f.generate("f")
print file("f.c").read()
In [14]:
A = SX.sym("A",3,3)
B = SX.sym("B",3)
print A
In [15]:
print solve(A,B)
In [16]:
print trace(A) # Trace
In [17]:
print mul(A,B) # Matrix multiplication
In [18]:
print norm_F(A) # Frobenius norm
In [19]:
print A[2,:] # Slicing
Rule 1: Everything is a matrix
In [20]:
print A.shape, z.shape
In [21]:
I = SX.eye(3)
print I
In [22]:
Ak = kron(I,A)
print Ak
Rule 1: Everything is a sparse matrix
In [23]:
with nice_stdout():
Ak.sparsity().spy()
In [24]:
with nice_stdout():
A.sparsity().spy()
In [25]:
with nice_stdout():
z.sparsity().spy()
Consider an ode:
\begin{equation} \dot{p} = (1 - q^2)p-q+u \end{equation}\begin{equation} \dot{q} = p \end{equation}\begin{equation} \dot{c} = p^2+q^2+u^2 \end{equation}
In [26]:
t = SX.sym("t") # time
u = SX.sym("u") # control
x = SX.sym("[p,q,c]") # state
p,q,c = x
In [27]:
ode = vertcat([(1 - q**2)*p - q + u, p, p**2+q**2+u**2])
print ode, ode.shape
In [28]:
J = jacobian(ode,x)
print J
In [29]:
f = SXFunction('f',[t,u,x],[ode])
f.init()
ffwd = f.derivative(1,0)
ffwd.init()
fadj = f.derivative(0,1)
fadj.init()
# side-by-side printing
print '{:*^24} || {:*^30} || {:*^30}'.format("f","ffwd","fadj")
for l in zip(str(f).split("\n"),str(ffwd).split("\n"),str(fadj).split("\n")):
print '{:<24} || {:<30} || {:<30}'.format(*l)
Performing forward sweeps gives the columns of J
In [30]:
print I
In [31]:
for i in range(3):
print ffwd([ t,u,x, 0,0,I[:,i] ]) [1]
In [32]:
print J
Performing adjoint sweeps gives the rows of J
In [33]:
for i in range(3):
print fadj([ t,u,x, I[:,i] ]) [3]
Often, you can do better than slicing with unit vectors
Note 3: CasADi does graph coloring for efficient sparse jacobians
$\dot{x}=f(x,u,t)$ with $x = [p,q,c]^T$
In [34]:
f = SXFunction('f',daeIn(x=x,t=t,p=u),daeOut(ode=ode))
f.init()
print str(f)[:327]
Construct an integrating block $x_{k+1} = \Phi(f;\Delta t;x_k,u_k)$
In [35]:
tf = 10.0
N = 20
dt = tf/N
In [36]:
Phi = Integrator('integrator',"cvodes",f)
Phi.setOption("name","Phi")
Phi.setOption("tf",dt)
Phi.init()
x0 = vertcat([0,1,0])
Phi.setInput(x0,"x0")
Phi.evaluate()
print Phi.getOutput()
In [37]:
x = x0
xs = [x]
for i in range(N):
Phi.setInput(x,"x0")
Phi.evaluate()
x = Phi.getOutput()
xs.append(x)
In [38]:
plot(horzcat(xs).T)
legend(["p","q","c"])
Out[38]:
Rule 2: Everything is a Function (see http://docs.casadi.org)
Note 4: this is what makes CasADi stand out among AD tools
Recall
In [39]:
n = 3
A = SX.sym("A",n,n)
B = SX.sym("B",n,n)
C = mul(A,B)
print C
dotdraw(C,direction='BT')
What if you don't want to expand into scalar operations? ( avoid $O(n^3)$ storage)
In [40]:
A = MX.sym("A",n,n)
B = MX.sym("B",n,n)
C = mul(A,B)
print C
dotdraw(C,direction='BT')
What if you cannot expand into matrix operations? ( numerical algorithm )
In [41]:
C = solve(A,B)
print C
dotdraw(C,direction='BT')
In [43]:
X0 = MX.sym("x",3)
XF = Phi({'x0': X0})['xf']
In [45]:
expr = sin(XF)+X0
dotdraw(expr,direction='BT')
In [47]:
F = MXFunction('F',[X0],[ expr ])
F.init()
print F
In [48]:
F.setInput(x0)
F.evaluate()
print F.getOutput()
In [49]:
J = F.jacobian()
J.init()
print J([ x0 ])
This shows how an integrator-call can be embedded in matrix graph.
More possibilities: external compiled library, a call to Matlab/Scipy
Remember, $\dot{x}=f(x,u,t)$ with $x = [p,q,c]^T$
\begin{equation} \begin{array}{cl} \underset{x(.),u(.)}{\text{minimize}} & c(T) \\\\ \text{subject to} & \dot{x} = f(x,u) \\\\ & p(0) = 0, q(0) = 1, c(0)= 0 \\\\ &-1 \le u(t) \le 1 \end{array} \end{equation}Discretization with multiple shooting
\begin{equation} \begin{array}{cl} \underset{x_{\bullet},u_{\bullet}}{\text{minimize}} & c_N \\\\ \text{subject to} & x_{k+1} - \Phi(x_k,u_k) = 0 , \quad \quad k = 0,1,\ldots, (N-1) \\\\ & p_0 = 0, q_0 = 1, c_0 = 0 \\\\ &-1 \le u_k \le 1 , \quad \quad k = 0,1,\ldots, (N-1) \end{array} \end{equation}Cast as NLP
\begin{equation} \begin{array}{cl} \underset{X}{\text{minimize}} & F(X,P) \\\\ \text{subject to} & \text{lbx} \le X \le \text{ubx} \\\\ & \text{lbg} \le G(X,P) \le \text{ubg} \\\\ \end{array} \end{equation}
In [50]:
X = struct_symMX([
(
entry("x", repeat=N+1, struct=struct(["p","q","c"]) ),
entry("u", repeat=N)
)
])
X is a symbolic matrix primitive, but with fancier indexing
In [51]:
print X.shape
print (N+1)*3+N
Demo: $\Phi(x_0,u_0)$
In [53]:
Phi({'x0': X["x",0], 'p': X["u",0]})['xf']
Out[53]:
$ x_{k+1} - \Phi(x_k,u_k) = 0 , \quad \quad k = 0,1,\ldots, (N-1)$
In [55]:
g = [] # List of constraint expressions
for k in range(N):
Xf = Phi({'x0': X["x",0], 'p': X["u",0]})['xf']
g.append( X["x",k+1]-Xf )
In [57]:
obj = X["x",N,"c"] # c_N
nlp = MXFunction( 'nlp', nlpIn(x=X), nlpOut(g=vertcat(g),f=obj) )
nlp.init()
print nlp
Block structure in the constraint Jacobian
In [58]:
jacG = nlp.jacobian("x","g")
jacG.init()
print jacG.getOutput().shape
with nice_stdout():
jacG.getOutput()[:20,:20].sparsity().spy()
Recall
\begin{equation} \begin{array}{cl} \underset{X}{\text{minimize}} & F(X,P) \\\\ \text{subject to} & \text{lbx} \le X \le \text{ubx} \\\\ & \text{lbg} \le G(X,P) \le \text{ubg} \\\\ \end{array} \end{equation}
In [60]:
solver = NlpSolver('solver',"ipopt",nlp)
solver.init()
solver.setInput(0,"lbg") # Equality constraints for shooting constraints
solver.setInput(0,"ubg") # 0 <= g <= 0
lbx = X(-inf)
ubx = X(inf)
lbx["u",:] = -1; ubx["u",:] = 1 # -1 <= u(t) <= 1
lbx["x",0] = ubx["x",0] = x0 # Initial condition
solver.setInput(lbx,"lbx")
solver.setInput(ubx,"ubx")
In [61]:
with nice_stdout():
solver.evaluate()
In [62]:
print solver.getOutput()
In [63]:
sol = X(solver.getOutput())
In [64]:
plot(horzcat(sol["x",:]).T)
Out[64]:
In [65]:
step(range(N),sol["u",:])
Out[65]:
Showcase: kite-power optimization by Greg Horn, using CasADi backend
In [ ]:
from IPython.display import YouTubeVideo
YouTubeVideo('tmjIBpb43j0')
In [ ]:
YouTubeVideo('SW6ZJzcMWAk')
Distinction with other software:
| ACADOtoolkit | CasADi |
|---|---|
|
|
| Other operator-overloading AD tools | CasADi |
|---|---|
|
|
Closest similarity: AMPL