In [ ]:
import numpy as np
import control as con
from numpy import linalg as LA
from scipy.special import comb as nchoosek # n Choose k (n ueber k)
import cvxpy
import optim_tools as optim_tools#own file with helper
In [ ]:
###########################
# Hydraulischer Aktor #
###########################
A0 = np.matrix([[0, 1, 0],
[-10, -1.167, 25],
[0, 0, -0.8]])
print "Eigenvalues: {}".format(LA.eigvals(A0))
#a = -A[-1,:].T ### !!!!!
#print a
b0 = np.matrix([[0],[0],[2.4]])
c0 = np.matrix([1, 0, 0])
d0 = np.matrix([0])
u_max = 10.5
n = 3
X00 = [np.matrix([-20.0, -10.0, -10.0]).T,
np.matrix([-20.0, -10.0, 10.0]).T,
np.matrix([-20.0, 10.0, -10.0]).T,
np.matrix([-20.0, 10.0, 10.0]).T,
np.matrix([20.0, -10.0, -10.0]).T,
np.matrix([20.0, -10.0, 10.0]).T,
np.matrix([20.0, 10.0, -10.0]).T,
np.matrix([20.0, 10.0, 10.0]).T,
]
#print "A:\n", A
#print "a:\n", a
#print "b:\n", b
#print "c:\n", c
# Convert to Normalform
(A1, b1, c1, d1), T1, Q1 = optim_tools.get_Steuerungsnormalform(A0, b0, c0.T, d0)
a1 = -A1[-1][:].T #!!!!
print "T1:\n", T1
# Convert to Normalform
ss, T2 = con.canonical_form(con.ss(A0, b0, c0, d0), form='reachable')
assert np.allclose(T1*X00[1],
optim_tools.reverse_x_order(T2*X00[1])),\
"own Steuerungsnormalform Transformation not equal python control version"
#print "x_r1:\n", T1*X00[1]
#print "x_r2(backwards):\n", optim_tools.reverse_x_order(T2*X00[1])
A = optim_tools.reverse_x_order(np.matrix(ss.A))
a = -A[-1][:].T #!!!!
b = optim_tools.reverse_x_order(np.matrix(ss.B))
c = optim_tools.reverse_x_order(np.matrix(ss.C))
d = optim_tools.reverse_x_order(np.matrix(ss.D)) # == 0!
print "A:\n", A
assert np.allclose(A, A1)
print "a:\n", a
assert np.allclose(a, a1)
print "b:\n", b
assert np.allclose(b, b1)
print "c:\n", c
assert np.allclose(c, c1.T)
X0 = [T1.dot(x0) for x0 in X00]
#print "X0:\n"
#for x0 in X0:
# print x0
#print "A1:\n", A1
#print "a1:\n", a1
#print "b1:\n", b1
#print "c1:\n", c1
In [ ]:
pmin = 0.1
In [ ]:
######################################################
# Helper for Constraint variant (4.65) -> Q_sum #
######################################################
# TODO: Was ist m? Kann man das berechnen oder wird das festgelegt m<=2n-1, meistens n+1?
from scipy.special import comb as nchoosek # n Choose k (n ueber k)
def get_a_list(a, Q, z, m):
n = len(a) # dim of a, z, Q(nxn)
m = m # Is this to choose or somehow given?
l = m+1 # length of the array with all a_i in it i:{0,m}
H = lambda k: optim_tools._H(k, n) # Convinence function to fix n and make specialized H(k)
N = optim_tools._N(n)
a_list = [np.zeros((n, n))] * l
for i in range(0, l): # m eingeschlossen
if i <= (m-1)/2.0: # 0 <= i <= (m-1)/2
for k in range(1, i+1):
a_list[i] += a.T * H(n+k-i)*Q*N*H(n-k+1)*a - z.T*N*H(n-i)*a
elif i <= m: # (m-1)/2 < i <=m
for k in range(1, 2*n-i+1):
a_list[i] += a.T * H(k)*Q*N*H(2*n-i-k+1)*a
else:
# this branch is currently not possible in this program
print "i={} < m={}".format(i, m)
a_list[i] = 0
return a_list
def trans_a_list(a_list, eps):
l = len(a_list)
m = l-1 #biggest index in a_list
a1_list = [np.zeros(a_list[0].shape)] * l
for j in range(0, l): #for each in a_list
for i in range(j, l): # for each coefficient including m
a1_list[j] += nchoosek(i, i-j) * ((1.0+eps)/(1.0-eps))**(i-j) * ((1.0-eps)/(2.0))**i * a_list[i]
return a1_list
def calc_a_Sum(a_list):
l = len(a_list) # number of matrizen in a_list
m = l-1 # Index of Q_m
n = a_list[0].size[0] # shape of each matrix, first element
if m is 0:
a_sum = cvxpy.bmat([[2*a_list[0], np.zeros(n)],
[np.zeros(n), np.zeros(n)]])
elif m is 1:
a_sum = cvxpy.bmat([[2*a_list[0], a_list[1]],
[a_list[1], np.zeros(n)]])
else: # e.g. m is 2 or more
a_sum = cvxpy.bmat([[2*a_list[0], a_list[1]],
[a_list[1], 2*a_list[2]]])
for i1 in range(3, l, 2):
S_new_col = cvxpy.vstack(np.zeros((((i1+1)/2-1)*n, n)), a_list[i1])
if i1 is m:
S_new_row = cvxpy.hstack(np.zeros((n, ((i1+1)/2-1)*n)), a_list[i1], np.zeros((n,n)))
else:
S_new_row = cvxpy.hstack(np.zeros((n, ((i1+1)/2-1)*n)), a_list[i1], 2*a_list[i1+1])
a_sum = cvxpy.bmat([[a_sum, S_new_col],
[S_new_row]])
a_sum = -0.5*a_sum
return a_sum
def calc_lmi_cond(a_sum, n):
k = a_sum.size[1] / n # This dimension is from Boris. Dilyana does not specifiy the dimension though
J = np.hstack([np.zeros((n*(k-1), n)), np.eye(n*(k-1))])
C = np.hstack([np.eye(n*(k-1)), np.zeros((n*(k-1), n))])
return np.vstack([C, J]), n*(k-1)
In [ ]:
##############################
# Convex Problem (4.58) #
##############################
# Variables
Q = cvxpy.Semidef(n) # Implys (459) (semidefinite for numerical reasons?)
z = cvxpy.Variable(n)
# Constants
N = optim_tools._N(n)
# Constraints
constraint_459 = Q >> 0
constraint_460 = Q*(A.T + a*b.T) + (A + b*a.T)*Q - z*b.T - b*z.T << 0
constraint_461 = Q*N + N*Q << 0
constraint_462 = [cvxpy.bmat([[1, X0[i].T],
[X0[i], Q ]]) >> 0
for i in range(0, len(X0))]
constraint_463 = cvxpy.bmat([[u_max**2 - a.T*Q*a + 2*a.T*z, z.T],
[z, Q ]]) >> 0
In [ ]:
###########################################
# Constraint variant (4.64) #
###########################################
#vectorize with cvxpy.sum_entries(X) ??
m = n
## BEWARE OF SECOND RANGE ####
constraint_464 = [sum([nchoosek(i, k) * a.T * optim_tools._P(0, i-k, n) * Q * N * optim_tools._P(0, k, n) * a - z.T * N * optim_tools._P(n, i, n) * a
for k in range(0,i)])>=0
for i in range(1, m+1)]
In [ ]:
###########################################
# Constraint variant (4.65) -> Q_sum #
###########################################
m = n
# Preparation for constraint (4.65)
a_list = get_a_list(a, Q, z, m) # Matrizes of Polynom coefficients: S(p)=S_0 + S_1*p + ... S_n*p^n
a1_list = trans_a_list(a_list, pmin) # Intervaltransformation p:[0,1] -> p1:[-1,1] (S1 -> S^tilde)
a1_sum = calc_a_Sum(a1_list) # S^tilde_sum Matrix (30)
CJ, l = calc_lmi_cond(a1_sum ,n) # "Selection matrizes" of (31), l=dimension of P and G
# Further variables of optimization
S = cvxpy.Semidef(l) #symmetrical
G = cvxpy.Variable(l,l) #skew
# Constraints on new variables
constraint_G = G + G.T == 0 # skew symmetry
constraint_S = S == S.T # symmetry
# Actual constraint
constraint_465 = a1_sum << CJ.T * cvxpy.bmat([[-S, G],
[G.T, S]]) * CJ
In [ ]:
print "TO FUTURE-ME!"
print "STOP! Please rethink and choose the following cells wisely!"
print "AND NOT ALL OF THEM!"
In [ ]:
###########################################
# Objective variant (4.66) -> max(det(Q)) #
# Volumenmaximierung -> geomean(Q) #
###########################################
# Collection of all constraints
constraints_451 = []
constraints_451.append(constraint_459) # Implcitly included by Q=Semidef
constraints_451.append(constraint_460) # -> for some objectives this constraint is replaced, therefore constraints[0]==constraint_460
constraints_451.append(constraint_461)
constraints_451.extend(constraint_462) ##!! Beware of the "extend" if input is array
constraints_451.append(constraint_463)
constraints_451.extend(constraint_464)
#constraints_451.append(constraint_G)
#constraints_451.append(constraint_S)
#constraints_451.append(constraint_465)
# Objective representation
obj_451 = cvxpy.Maximize(cvxpy.log_det(Q)) # Identical to geo_mean (in term of convexity and result)
#obj = cvxpy.Minimize(-cvxpy.geo_mean(Q)) # Not yet implemented
prob_451 = cvxpy.Problem(obj_451, constraints_451)
prob_451_alt = cvxpy.Problem(cvxpy.Minimize(0), constraints_451)
In [ ]:
#https://cvxpy.readthedocs.io/en/latest/tutorial/advanced/index.html#getting-the-standard-form
# Compile SCS with gpu support (https://askubuntu.com/questions/799184/how-can-i-install-cuda-on-ubuntu-16-04)
#https://groups.google.com/forum/#!topic/cvxpy/qyiA94ojb_A
In [ ]:
%%time
# Solve the problem with SCS
#prob_451.solve(solver=cvxpy.SCS, verbose=True, max_iters=5000)
#prob_451 = optim_tools.accurate_solve(prob_451, cvxpy.SCS, warm_start=True, max_iters=5000)
prob_451_alt = optim_tools.checked_solve(prob_451_alt, cvxpy.CVXOPT) # Feasibility only!
#prob_451 = optim_tools.checked_solve(prob_451, cvxpy.SCS, max_iters=10000)
In [ ]:
print "Objective variant (4.5.1) -> Max. Volume: max(det(Q))"
print prob_451.status
Q_451 = Q.value
R1_451 = LA.inv(Q.value)
a_hat_451 = R1_451.dot(z.value)
print "Q:\n", Q_451
print "R1:\n", R1_451
print "z:\n", z.value
print "a_hat:\n", a_hat_451
In [ ]:
##
#
# @ipa-lth this is a known bug. You can't do the "X >> 0" constraints with MOSEK. sorry! just do "X == Semidef(n)". It's basically the same
In [ ]:
###########################################
# Objective variant (4.70) #
# Max. Abklingrate #
###########################################
# Bisection parameter
beta = cvxpy.Parameter(sign='positive')
constraint_471 = Q*(A.T + a*b.T) + (A + b*a.T)*Q - z*b.T - b*z.T + 2*beta*Q << 0
# Collection of all constraints
constraints_452 = []
#constraints_452.append(constraint_459) # Implcitly included by Q=Semidef
constraints_452.append(constraint_461)
constraints_452.extend(constraint_462) ##!! Beware of the "extend" if input is array
constraints_452.append(constraint_463)
constraints_452.extend(constraint_464)
#constraints_452.append(constraint_G)
#constraints_452.append(constraint_S)
#constraints_452.append(constraint_465)
constraints_452.append(constraint_471)
#obj_451 = cvxpy.Maximize(cvxpy.log_det(Q)) # Identical to geo_mean (in term of convexity and result)
# This objective somehow give wrong bisection results
obj_452 = cvxpy.Minimize(0) # Feasiblity for bisection
prob_452 = cvxpy.Problem(obj_452, constraints_452)
In [ ]:
#optim_tools.checked_solve(prob_452, solver=cvxpy.SCS, max_tol=0.1, verbose=True, max_iters=5000)
#prob_452.solve(solver=cvxpy.SCS, verbose=True, max_iters=500)
In [ ]:
%%time
# Solve with bisection (and SCS)
[[Q_o, z_o], beta_o] = optim_tools.bisect_max(0, None, prob_452, beta, [Q, z], bisect_verbose=True,
solver=cvxpy.CVXOPT, warm_start=False, max_iters=160000)
print "\n"
print "Objective variant (4.5.2) -> Bisection(Max. Abklingrate)"
print "Bisection Param:", beta_o
# Output
print "Status:", prob_452.status
#print problem_opt
Q_452 = Q_o
R1_452 = LA.inv(Q_o)
a_hat_452 = R1_452.dot(z_o)
print "Q:\n", Q_452
print "R1:\n", R1_452
print "z:\n", z_o
print "a_hat:\n", a_hat_452
In [ ]:
a_hat_x = np.matrix([[14.345160], [14.837268], [3.598276]])
R1_x = np.matrix([[1.677977, 0.543373, 0.140299],
[0.543373, 0.32290, 0.062725],
[0.140298, 0.06272, 0.026148]])
Some checks for validy of solution
In [ ]:
import numpy as np
print "Checking constraints (no output is ok!)"
eig_R1_451 = np.linalg.eigvals(R1_451)
if np.any(eig_R1_451 <= 0): print("Q not pos definit: {}".format(eig_R1_451))
eig_R1_452 = np.linalg.eigvals(R1_452)
if np.any(eig_R1_452 <= 0): print("Q not pos definit: {}".format(eig_R1_452))
#eig_460 = np.linalg.eigvals((Q*(A.T+a*b.T)+(A+b*a.T)*Q-z*b.T-b*z.T).value)
#if np.any(eig_460 >= 0): print("4.60 not neg definit: {}".format(eig_460))
#eig_461 = np.linalg.eigvals((Q*N+N*Q).value)
#if np.any(eig_461 >= 0): print("4.61 not neg definit: {}".format(eig_461))
In [ ]:
T = np.arange(0, 5, 1e-2)
#s: input, e.g., step function with amplitude of 0.2
s = np.zeros(len(T));
p_init = pmin
# Initial state
x0 = np.matrix([[20.],[10.],[10.]])
p_t = np.zeros(len(T))
In [ ]:
# Skalierte Version! aus Yankulova
# k(v) (4.5)
def get_k(v, a, a_hat):
try:
v = v.squeeze() # This is weird, needed for minimize for some reason
except:
pass
D_inv = optim_tools._D_inv(v, len(a))
#print D_inv
k = D_inv.dot(a_hat) - a
return k
#print "Test k\n", get_k(0.2, np.matrix([124, 48, 12]).T, np.matrix([1, 2, 3]).T)
#assert(np.allclose(-a+a_hat, get_k(1, a, a_hat)))
# Fixing a and a_hat for convinience
# func_k = lambda v: get_k(v, a, a_hat)
# G(v) (4.4)
def get_g(v, x, R1, u_max, a, a_hat):
try:
v = v.squeeze() # This is weird, needed for minimize for some reason
except:
pass
D_inv = optim_tools._D_inv(v, len(x))
R = D_inv.dot(R1).dot(D_inv) # R(v) = D^⁻1 * R1 * D^-1
k = get_k(v, a, a_hat) # k(v)
e = (u_max**(-2)) * (k.T.dot(LA.inv(R)).dot(k))
#assert e < 1.0
g = e*(x.T.dot(R).dot(x)) - 1.0
#assert g <= 0, "g = {} > 0".format(g)
# Update 2016: As of python 3.5, there is a new matrix_multiply symbol, @:
# g = x' @ D^-1 @ R1 @ D^-1 @ x - 1.0
return g
In [ ]:
from scipy.optimize import minimize
last_p = p_init
def contr_func(y, s, x, u_max, a, a_hat, R1, T1=None):
global last_p, pmin
# Transformation in Regelungsnormalform
if T1 is None:
T1 = np.eye(len(x))
x_R = T1*x
func_g = lambda p: np.absolute(get_g(p, x_R, R1, u_max, a, a_hat))
res = minimize(func_g, last_p, method='Nelder-Mead') # find 0 -fzero
# Saturate if too small
if res.x < pmin:
p = pmin
elif res.x > 1.0:
p = 1.0
print "WARNING: p=({})>1! -> p is saturated to 1.0".format(res.x)
else:
p = res.x
p_t.append(p)
p2_t.append(res.x)
last_p = p
## Calc K according to p
K = get_k(p, a, a_hat)
# Calc u
u = s-K.T.dot(x_R)
# Saturate u
u = optim_tools.sat(u, u_max)
#print "u", u
return u
In [ ]:
In [ ]:
p_t = []
p2_t = []
y, u, u_sat = optim_tools.simulate(A0, b0, c0, d0, lambda y, s, x: contr_func(y, s, x, u_max, a, a_hat_451, R1_451, T1), s, T, umax=u_max, x0=x0)
y1, u1, u1_sat = optim_tools.simulate(A0, b0, c0, d0, lambda y, s, x: contr_func(y, s, x, u_max, a, a_hat_x, R1_x, T1), s, T, umax=u_max, x0=x0)
y2, u2, u2_sat = optim_tools.simulate(A0, b0, c0, d0, lambda y, s, x: contr_func(y, s, x, u_max, a, a_hat_452, R1_452, T1), s, T, umax=u_max, x0=x0)
#y1x, u1x, u1x_sat = optim_tools.simulate(A0, b0, c0, d0, lambda y, s, x: contr_func(y, s, x, u_max, a, a_hat_x, R1_x, T1), s, T, umax=u_max, x0=x0)
#y1, u1, u1_sat = optim_tools.simulate(A1, b1, c1.T, d1, lambda y, s, x: contr_func(y, s, x, u_max, a, a_hat), s, T, umax=u_max, x0=T1*x0)
#y2, u2, u2_sat = optim_tools.simulate(A0, b0, c0, d0, lambda y, s, x: s-get_k(1, a, a_hat_451).T.dot(T1*x), s, T, umax=u_max, x0=x0)
#y3, u3, u3_sat = optim_tools.simulate(A0, b0, c0, d0, lambda y, s, x: s-np.matrix([-0.0833, 0.0828, 0.76068]).dot(x), s, T, umax= u_max, x0=x0)
y4, u4, u4_sat = optim_tools.simulate(A0, b0, c0, d0, lambda y, s, x: s-np.matrix([13.25659, 7.097648, 4.0502]).dot(x), s, T, umax= u_max, x0=x0)
In [ ]:
%pylab inline
pylab.rcParams['figure.figsize'] = (10, 6)
import matplotlib.pyplot as plt
#plt.figure()
line0, = plt.plot(T[:], np.array(y[0,:].T), 'b', label='WSVC 451')
line1, = plt.plot(T[:], np.array(y1[0,:].T), 'y*-', label='WSVC x')
line2, = plt.plot(T[:], np.array(y2[0,:].T), 'r-', label='WSVC 452')
#line2, = plt.plot(T[:], np.array(y2[0,:].T), 'r-', label='linear(v=1.0)')
#line3, = plt.plot(T[:], np.array(y3[0,:].T), 'g-', label='linear')
line4, = plt.plot(T[:], np.array(y4[0,:].T), 'g-', label='y4 lin, apx evo')
#first_legend = plt.legend(handles=[line1], loc=1)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('T')
plt.ylabel('y')
plt.title('Closed Loop Step Response')
plt.show()
#plt.figure()
#line0, = plt.plot(T, u_sat, 'b', label='u')
#line0, = plt.plot(T, u1x_sat, 'b', label='WSVC x')
line0, = plt.plot(T, u, 'b.-', label='WSVC 451')
line1, = plt.plot(T, u1_sat, 'y*-', label='WSVC x')
line2, = plt.plot(T, u2_sat, 'r-', label='WSVC 452')
#line2b, = plt.plot(T, u2, 'r.-', label='u fixed')
#line3, = plt.plot(T, u3_sat, 'g', label='u, linear')
#line3b, = plt.plot(T, u2, 'g.-', label='u lin')
line4, = plt.plot(T, u4_sat, 'g-', label='u lin, apx evo')
#>first_legend = plt.legend(handles=[line1, line2, line1b, line2b], loc=1)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.xlabel('T')
plt.ylabel('u')
plt.title('Output values')
plt.show()
line6, = plt.plot(p_t, 'r', label='p')
line7, = plt.plot(p2_t, 'r.', label='p')
plt.show()
In [ ]: