In [1]:
import numpy as np
import pandas as pd
import accpm
import accpm
from IPython.display import display
%load_ext autoreload
%autoreload 1
%aimport accpm
$\DeclareMathOperator{\domain}{dom} \newcommand{\transpose}{\text{T}} \newcommand{\vec}[1]{\begin{pmatrix}#1\end{pmatrix}}$
To test the $\texttt{analytic_center}$ function we consider the following example. Suppose we want to find the analytic center $x_{ac} \in \mathbb{R}^2$ of the inequalities $x_1 \leq c_1, x_1 \geq 0, x_2 \leq c_2, x_2 \geq 0$. This is a rectange with dimensions $c_1 \times c_2$ centered at at $(\frac{c_1}{2}, \frac{c_2}{2})$ so we should have $x_{ac} = (\frac{c_1}{2}, \frac{c_2}{2})$. Now, $x_{ac}$ is the solution of the minimization problem \begin{equation*} \min_{\domain \phi} \phi(x) = - \sum_{i=1}^{4}{\log{(b_i - a_i^\transpose x)}} \end{equation*} where \begin{equation*} \domain \phi = \{x \;|\; a_i^\transpose x < b_i, i = 1, 2, 3, 4\} \end{equation*} with \begin{align*} &a_1 = \begin{bmatrix}1\\0\end{bmatrix}, &&b_1 = c_1, \\ &a_2 = \begin{bmatrix}-1\\0\end{bmatrix}, &&b_2 = 0, \\ &a_3 = \begin{bmatrix}0\\1\end{bmatrix}, &&b_3 = c_2, \\ &a_4 = \begin{bmatrix}0\\-1\end{bmatrix}, &&b_4 = 0. \end{align*} So we solve \begin{align*} &\phantom{iff}\nabla \phi(x) = \sum_{i=1}^{4 } \frac{1}{b_i - a_i^\transpose x}a_i = 0 \\ &\iff \frac{1}{c_1-x_1}\begin{bmatrix}1\\0\end{bmatrix} + \frac{1}{x_1}\begin{bmatrix}-1\\0\end{bmatrix} + \frac{1}{c_2-x_2}\begin{bmatrix}0\\1\end{bmatrix} + \frac{1}{x_2}\begin{bmatrix}0\\-1\end{bmatrix} = 0 \\ &\iff \frac{1}{c_1-x_1} - \frac{1}{x_1} = 0, \frac{1}{c_2-x_2} - \frac{1}{x_2} = 0 \\ &\iff x_1 = \frac{c_1}{2}, x_2 = \frac{c_2}{2}, \end{align*} as expected.
We test $\texttt{analytic_center}$ for varying values of $c_1, c_2$ and algorithm parameters $\texttt{alpha, beta}$:
In [2]:
def get_results(A, test_input, alpha, beta, tol=10e-8):
expected = []
actual = []
result = []
for (c1, c2) in test_input:
b = np.array([c1, 0, c2, 0])
ac_expected = np.asarray((c1/2, c2/2))
ac_actual = accpm.analytic_center(A, b, alpha = alpha, beta = beta)
expected.append(ac_expected)
actual.append(ac_actual)
# if np.array_equal(ac_expected, ac_actual):
if np.linalg.norm(ac_expected - ac_actual) <= tol:
result.append(True)
else:
result.append(False)
results = pd.DataFrame([test_input, expected, actual, result])
results = results.transpose()
results.columns = ['test_input', 'expected', 'actual', 'result']
print('alpha =', alpha, 'beta =', beta)
display(results)
Here we have results for squares of varying sizes and for varying values of $\texttt{alpha}$ and $\texttt{beta}$. In general, the algorithm performs worse on large starting polyhedrons than small starting polyhedrons. This seems acceptable given that we are most concerned with smaller polyhedrons.
In [3]:
A = np.array([[1, 0],[-1,0],[0,1],[0,-1]])
test_input = [(1, 1), (5, 5), (20, 20), (10e2, 10e2), (10e4, 10e4),
(10e6, 10e6), (10e8, 10e8), (10e10, 10e10),
(0.5, 0.5), (0.1, 0.1), (0.01, 0.01),
(0.005, 0.005), (0.001, 0.001),(0.0005, 0.0005), (0.0001, 0.0001),
(0.00005, 0.00005), (0.00001, 0.00001), (0.00001, 0.00001)]
In [4]:
get_results(A, test_input, alpha=0.01, beta=0.7)
In [5]:
get_results(A, test_input, alpha=0.01, beta=0.99)
In [6]:
get_results(A, test_input, alpha=0.49, beta=0.7)
In [7]:
get_results(A, test_input, alpha=0.25, beta=0.7)