In [ ]:
import numpy
import scipy.stats
from scipy.integrate import quad, dblquad

In [ ]:
from timeit import default_timer as timer

In [ ]:
CHEB_NODES_NB = 15
LOW_BOUND = -10.
UPP_BOUND = 10.

In [ ]:
N = 30
M = 1000000

# Gaussian expectation vector
Mu = numpy.zeros(N)

# Gaussian variance-covariance matrix
Sigma = numpy.array([[ 4.09, -0.89, -1.12, -0.03,  0.21, -0.88,  0.15,  0.04, -1.01,
         0.88,  0.98,  0.26, -0.31,  0.94,  0.28, -0.05, -0.89, -1.09,
         0.14, -1.83, -0.1 , -0.76,  0.36,  0.24, -0.04, -0.18,  0.94,
         0.95,  1.01,  0.45],
       [-0.89,  3.17, -0.78, -0.32,  0.6 ,  0.34,  0.1 , -0.08,  0.6 ,
        -0.1 ,  0.59,  0.24, -0.22,  0.55, -0.29,  1.04,  0.03,  0.34,
         0.55,  0.9 , -0.39, -0.64, -1.  , -0.02, -0.64, -0.01, -0.11,
        -0.89,  1.11, -0.67],
       [-1.12, -0.78,  4.73,  0.01,  0.2 ,  1.36, -0.19, -0.05, -0.2 ,
        -1.22, -0.82, -0.01,  0.08, -0.55,  0.6 ,  0.14, -0.51, -1.24,
         1.05,  1.28,  0.95, -0.23,  2.24, -0.61,  0.64, -0.  , -0.52,
        -1.92, -1.38,  0.45],
       [-0.03, -0.32,  0.01,  3.83,  0.02, -1.1 , -0.22,  0.07, -0.59,
        -0.2 , -0.77,  0.1 ,  0.26, -0.25,  0.34,  1.38, -0.62, -0.29,
         0.33, -1.24,  0.42,  1.08,  1.14,  0.22, -1.03, -0.23,  0.2 ,
         1.36, -0.23, -0.91],
       [ 0.21,  0.6 ,  0.2 ,  0.02,  1.15, -0.1 ,  0.05,  0.05, -0.08,
        -0.6 , -0.09,  0.22, -0.07,  0.15, -0.06, -0.07, -0.46, -0.18,
         0.29,  0.12,  0.21,  0.1 , -0.21, -0.26, -0.05,  0.08, -0.26,
        -0.99, -0.23,  0.04],
       [-0.88,  0.34,  1.36, -1.1 , -0.1 ,  5.78,  0.2 , -0.04, -0.64,
        -0.31,  1.06,  0.32, -0.16, -0.71,  0.96,  0.88, -1.24, -0.18,
        -1.17, -1.07, -0.82, -0.51,  1.69, -1.13,  0.82,  0.49, -0.43,
        -0.5 , -0.04, -1.21],
       [ 0.15,  0.1 , -0.19, -0.22,  0.05,  0.2 ,  0.2 ,  0.02,  0.03,
        -0.  ,  0.02,  0.05, -0.03,  0.14,  0.01, -0.03, -0.06, -0.04,
        -0.12, -0.17, -0.04, -0.14, -0.01, -0.08,  0.07,  0.06, -0.01,
        -0.03,  0.14,  0.09],
       [ 0.04, -0.08, -0.05,  0.07,  0.05, -0.04,  0.02,  0.17,  0.04,
         0.04, -0.  , -0.05, -0.  , -0.01, -0.01, -0.14,  0.04, -0.08,
        -0.1 , -0.04,  0.03,  0.01,  0.06, -0.06,  0.01,  0.1 ,  0.01,
         0.12, -0.03,  0.05],
       [-1.01,  0.6 , -0.2 , -0.59, -0.08, -0.64,  0.03,  0.04,  2.68,
         0.36, -0.12, -0.44, -0.24, -0.57, -0.41, -0.69, -0.33,  0.46,
        -0.38,  0.72,  0.15, -0.38, -1.56,  0.09,  0.53, -0.17,  0.52,
         0.31, -0.77,  0.71],
       [ 0.88, -0.1 , -1.22, -0.2 , -0.6 , -0.31, -0.  ,  0.04,  0.36,
         3.09,  1.42, -0.15, -0.14,  0.29,  0.39, -0.54,  0.92,  0.22,
        -0.41,  0.32, -0.61, -0.3 , -0.14,  0.72, -0.13, -0.57,  0.01,
         2.19,  1.69, -0.38],
       [ 0.98,  0.59, -0.82, -0.77, -0.09,  1.06,  0.02, -0.  , -0.12,
         1.42,  2.2 ,  0.27, -0.42, -0.12,  0.09, -0.41,  0.15, -0.31,
        -0.3 ,  0.09, -0.71, -0.43,  0.33,  0.3 , -0.07, -0.14,  0.23,
         1.4 ,  1.33, -0.  ],
       [ 0.26,  0.24, -0.01,  0.1 ,  0.22,  0.32,  0.05, -0.05, -0.44,
        -0.15,  0.27,  0.55,  0.01,  0.29,  0.1 ,  0.12,  0.01, -0.21,
        -0.09, -0.13,  0.1 ,  0.12,  0.46, -0.16, -0.03, -0.11, -0.11,
        -0.11,  0.49,  0.08],
       [-0.31, -0.22,  0.08,  0.26, -0.07, -0.16, -0.03, -0.  , -0.24,
        -0.14, -0.42,  0.01,  0.55,  0.02,  0.18,  0.18,  0.25,  0.09,
        -0.16,  0.1 ,  0.06,  0.1 , -0.01, -0.03, -0.07, -0.11, -0.2 ,
        -0.14,  0.07, -0.02],
       [ 0.94,  0.55, -0.55, -0.25,  0.15, -0.71,  0.14, -0.01, -0.57,
         0.29, -0.12,  0.29,  0.02,  1.97,  0.1 ,  0.23,  0.81, -0.21,
        -0.1 , -0.1 ,  0.5 , -0.25,  0.02, -0.33,  0.23, -0.04,  0.27,
        -0.7 ,  1.22, -0.26],
       [ 0.28, -0.29,  0.6 ,  0.34, -0.06,  0.96,  0.01, -0.01, -0.41,
         0.39,  0.09,  0.1 ,  0.18,  0.1 ,  0.96,  0.42, -0.15, -0.69,
        -0.19, -0.62, -0.12, -0.22,  0.61, -0.03,  0.13, -0.02,  0.04,
         0.69, -0.02, -0.55],
       [-0.05,  1.04,  0.14,  1.38, -0.07,  0.88, -0.03, -0.14, -0.69,
        -0.54, -0.41,  0.12,  0.18,  0.23,  0.42,  2.59, -1.24, -0.2 ,
        -0.09, -0.53, -0.52, -0.14,  0.6 ,  0.03, -0.73, -0.14,  0.08,
        -0.74,  0.88, -1.  ],
       [-0.89,  0.03, -0.51, -0.62, -0.46, -1.24, -0.06,  0.04, -0.33,
         0.92,  0.15,  0.01,  0.25,  0.81, -0.15, -1.24,  2.97,  0.23,
         0.28,  1.14,  0.26,  0.27, -0.62,  0.1 ,  0.06, -0.01, -0.42,
         0.95,  0.27, -0.37],
       [-1.09,  0.34, -1.24, -0.29, -0.18, -0.18, -0.04, -0.08,  0.46,
         0.22, -0.31, -0.21,  0.09, -0.21, -0.69, -0.2 ,  0.23,  2.77,
         0.5 ,  0.19,  0.15,  0.6 , -1.25,  0.01, -0.1 , -0.12, -0.5 ,
        -1.12, -0.11, -0.67],
       [ 0.14,  0.55,  1.05,  0.33,  0.29, -1.17, -0.12, -0.1 , -0.38,
        -0.41, -0.3 , -0.09, -0.16, -0.1 , -0.19, -0.09,  0.28,  0.5 ,
         3.37, -0.11,  0.71, -0.4 , -0.32,  0.4 , -0.67, -0.01, -0.24,
        -0.48, -1.39, -0.66],
       [-1.83,  0.9 ,  1.28, -1.24,  0.12, -1.07, -0.17, -0.04,  0.72,
         0.32,  0.09, -0.13,  0.1 , -0.1 , -0.62, -0.53,  1.14,  0.19,
        -0.11,  4.13, -0.05,  0.13,  0.23,  0.44, -0.2 , -0.31, -0.97,
        -1.92,  1.56,  1.45],
       [-0.1 , -0.39,  0.95,  0.42,  0.21, -0.82, -0.04,  0.03,  0.15,
        -0.61, -0.71,  0.1 ,  0.06,  0.5 , -0.12, -0.52,  0.26,  0.15,
         0.71, -0.05,  1.37,  0.26,  0.45, -0.39,  0.42,  0.01,  0.18,
        -0.77, -0.7 ,  0.33],
       [-0.76, -0.64, -0.23,  1.08,  0.1 , -0.51, -0.14,  0.01, -0.38,
        -0.3 , -0.43,  0.12,  0.1 , -0.25, -0.22, -0.14,  0.27,  0.6 ,
        -0.4 ,  0.13,  0.26,  1.49,  0.41,  0.11, -0.12, -0.03, -0.33,
         0.03, -0.18, -0.08],
       [ 0.36, -1.  ,  2.24,  1.14, -0.21,  1.69, -0.01,  0.06, -1.56,
        -0.14,  0.33,  0.46, -0.01,  0.02,  0.61,  0.6 , -0.62, -1.25,
        -0.32,  0.23,  0.45,  0.41,  4.4 , -0.03, -0.13,  0.11, -0.31,
        -0.29,  1.69,  0.95],
       [ 0.24, -0.02, -0.61,  0.22, -0.26, -1.13, -0.08, -0.06,  0.09,
         0.72,  0.3 , -0.16, -0.03, -0.33, -0.03,  0.03,  0.1 ,  0.01,
         0.4 ,  0.44, -0.39,  0.11, -0.03,  1.36, -0.56, -0.23,  0.14,
         1.07,  0.65,  0.51],
       [-0.04, -0.64,  0.64, -1.03, -0.05,  0.82,  0.07,  0.01,  0.53,
        -0.13, -0.07, -0.03, -0.07,  0.23,  0.13, -0.73,  0.06, -0.1 ,
        -0.67, -0.2 ,  0.42, -0.12, -0.13, -0.56,  1.27,  0.05,  0.34,
        -0.34, -0.73,  0.23],
       [-0.18, -0.01, -0.  , -0.23,  0.08,  0.49,  0.06,  0.1 , -0.17,
        -0.57, -0.14, -0.11, -0.11, -0.04, -0.02, -0.14, -0.01, -0.12,
        -0.01, -0.31,  0.01, -0.03,  0.11, -0.23,  0.05,  0.69,  0.06,
        -0.16, -0.48, -0.09],
       [ 0.94, -0.11, -0.52,  0.2 , -0.26, -0.43, -0.01,  0.01,  0.52,
         0.01,  0.23, -0.11, -0.2 ,  0.27,  0.04,  0.08, -0.42, -0.5 ,
        -0.24, -0.97,  0.18, -0.33, -0.31,  0.14,  0.34,  0.06,  1.29,
         0.96, -0.28,  0.17],
       [ 0.95, -0.89, -1.92,  1.36, -0.99, -0.5 , -0.03,  0.12,  0.31,
         2.19,  1.4 , -0.11, -0.14, -0.7 ,  0.69, -0.74,  0.95, -1.12,
        -0.48, -1.92, -0.77,  0.03, -0.29,  1.07, -0.34, -0.16,  0.96,
         5.81, -0.35, -0.59],
       [ 1.01,  1.11, -1.38, -0.23, -0.23, -0.04,  0.14, -0.03, -0.77,
         1.69,  1.33,  0.49,  0.07,  1.22, -0.02,  0.88,  0.27, -0.11,
        -1.39,  1.56, -0.7 , -0.18,  1.69,  0.65, -0.73, -0.48, -0.28,
        -0.35,  4.81,  1.05],
       [ 0.45, -0.67,  0.45, -0.91,  0.04, -1.21,  0.09,  0.05,  0.71,
        -0.38, -0.  ,  0.08, -0.02, -0.26, -0.55, -1.  , -0.37, -0.67,
        -0.66,  1.45,  0.33, -0.08,  0.95,  0.51,  0.23, -0.09,  0.17,
        -0.59,  1.05,  3.02]])
print "Mu = %s" % Mu
print "Sigma = \n %s" % Sigma

ts_rv = timer()
rv = scipy.stats.multivariate_normal (mean=Mu, cov=Sigma, allow_singular=True)
X = rv.rvs(size=M)
te_rv = timer()

ctp = te_rv - ts_rv

Link between paper and code

\begin{equation*} \ell(x) = \sum_k x_k + \frac{1}{2} \sum_k (x_k^+)^2 + \sum_{j < k} x_j^ + x_k^+ \end{equation*}

We need to solve the following system:

\begin{equation*} \begin{cases} \lambda \left[ 1 + \mathbb{E} \left( \left(X_k - m_k^* \right)^+ \right) + \sum_{j \neq k} \mathbb{E} \left( \left(X_j - m_j^* \right)^+ 1_{X_k > m_k^*} \right) \right] = 1 \text{ for all $k$} \\ \sum_k \mathbb{E} \left( X_k - m_k^* \right) + \sum_k \frac{1}{2} \mathbb{E} \left( \left[ \left( X_k - m_k^* \right)^+ \right]^2 \right) + \sum_{j < k} \mathbb{E} \left( \left( X_j - m_j^* \right)^+ \left( X_k - m_k^* \right)^+ \right) = c \end{cases} \end{equation*}

Definitions from the paper:

\begin{equation*} \begin{cases} \lambda \left[ 1 + f_k \left( m_k^* \right) + \sum_{j \neq k} l_{j, k} \left( m_j^*, m_k^* \right) \right] = 1 \text{ for all $k$} \\ \sum_k \left( e_k \left( m_k^* \right) + \frac{1}{2} g_k \left( m_k^* \right) + \sum_{j < k} h \left( m_j^*, m_k^* \right) \right) = c \end{cases} \end{equation*}

In [ ]:
def e(index, x):
    mu = Mu[index]
    
    return mu - x

In [ ]:
def f(k, x):
    global X
    Xk = X[:, k]
    Xkmx = Xk - x    
    
    Xkmxp = numpy.maximum(Xkmx, 0.)
    
    return numpy.mean(Xkmxp)

In [ ]:
def g(k, x):
    global X
    Xk = X[:, k]
    Xkmx = Xk - x    
    
    Xkmxp = numpy.maximum(Xkmx, 0.)
    
    Xkmxp2 = numpy.square(Xkmxp)
    
    return numpy.mean(Xkmxp2)

In [ ]:
def h(j, k, x, y):
    global X
    Xjk = X[:, [j, k]]
    
    m = numpy.array([x, y])
    zero = numpy.zeros(2)
        
    Xmm = numpy.subtract(Xjk, m)
    Xmmp = numpy.maximum(Xmm, zero)
    
    prod = numpy.cumprod(Xmmp, axis=1)[:, -1]
    return numpy.mean(prod)

In [ ]:
def l(j, k, x, y):
    global X
    Xjk = X[:, [j, k]]
    
    m = numpy.array([x, y])
    zero = numpy.zeros(2)
        
    Xmm = numpy.subtract(Xjk, m)
    Xmmp = numpy.maximum(Xmm, zero)
    
    # We use the fact that sgn(0) = 0
    Xmmp[:, 1] = numpy.sign(Xmmp[:, 1])

    prod = numpy.cumprod(Xmmp, axis=1)[:, -1]
    return numpy.mean(prod)

In [ ]:
class WrapperFunc(object):
    def __init__(self, func, indexes):
        self._i = indexes
        self._f = func
        
    def __call__(self, *x):
        tmp_args = self._i + list(x)
        return self._f(*tmp_args)

Put the variables and functions to the remote engines


In [ ]:
import sys

sys.path.append("../../lib")

In [ ]:
from tchebychev import TchebychevFun

def init_funcs():
    from timeit import default_timer as timer
    
    funcs = dict()
    interpol_time = 0.
    
    for k in range(N):
        ek = WrapperFunc(e, [k])
        
        fk = WrapperFunc(f, [k])
        
        gk = WrapperFunc(g, [k])
    
        hk = dict()
        lk = dict()
        for j in range(N):
            if j < k:
                hjk = WrapperFunc(h, [j, k])
                hk[j] = hjk
                
            if j != k:
                ljk = WrapperFunc(l, [j, k])
                lk[j] = ljk
    
        funcs[k] = {"e": ek, "f": fk, "g": gk, 
                    "hk": hk, "lk": lk}
        
    global ctp
    ctp += interpol_time
        
    return funcs

In [ ]:
def prepare_parameterized_funcs(dict_funcs):
    res = dict()
        
    for k, v in dict_funcs.iteritems():
        for kk, vv in v.iteritems():
            if isinstance(vv, dict):
                for j, f in vv.iteritems():
                    key = "%s_%s_%s" % (kk[:-1], j, k)
                    res[key] = (f, j, k)
            else:
                key = "%s_%s" % (kk, k)
                res[key] = (vv, k)
        
    return res

In [ ]:
def run_par_func(param):
    f = param[0]
    vm = []
    for i_m in param[1:]:
        vm.append(M[i_m])
    
    return f(*vm)

In [ ]:
funcs = init_funcs()
flatten_funcs = prepare_parameterized_funcs(funcs)

In [ ]:
from scipy.optimize import fsolve

def optimize(c, **kwargs):
    global N
    
    __results = {key: dict() for key in "efghl"}
        
    def fill_results(lbl, res):
        m_lbl = lbl.split("_")
        
        fname = m_lbl[0]
        k = int(m_lbl[-1])
        
        if len(m_lbl) == 3:
            j = int(m_lbl[1])
            if k not in __results[fname]:
                __results[fname][k] = dict()
                
            __results[fname][k][j] = res
        else:
            __results[fname][k] = res
    
    def objective(v_x):
        _lambda = v_x[-1]
        
        # Local version
        global M
        M = numpy.clip(v_x, LOW_BOUND, UPP_BOUND)
        f_res = map(run_par_func, flatten_funcs.values())
        
        for lbl, fx in zip(flatten_funcs.keys(), f_res):
            fill_results(lbl, fx)
            
        last = 0.
        res = numpy.zeros(N + 1)
        
        for k in range(N):
            res_k = 1 + __results["f"][k]
            for j, v in __results["l"][k].iteritems():
                res_k += v
                
            res[k] = (_lambda * res_k - 1.)**2
            
            last += __results["e"][k] + .5 * __results["g"][k]
            if k in __results["h"]:
                for j, v in __results["h"][k].iteritems():
                    last += v
                    
        res[-1] = (last - c)**2
        return res
    
    guess = numpy.zeros(N + 1)
    if 'guess' in kwargs and kwargs['guess'] is not None:
        guess = kwargs.pop('guess')
        
    return fsolve(objective, guess, **kwargs)

In [ ]:
c = 1.

guess = numpy.zeros(N+1)

In [ ]:
tic = timer()

continue_ = True
__i = 1
l_N = 0
while continue_:
    print "iteration nb %i" % __i
    optim_result = optimize(c, guess=guess, maxfev=1000, full_output=True)
    continue_ = optim_result[2] != 1
    if continue_:
        guess = optim_result[0]
        __i += 1
        l_N += optim_result[1]["nfev"]
        print
    
toc_m_tic = timer() - tic

print optim_result[-1]

In [ ]:
m_star = optim_result[0][:-1]
lb_star = optim_result[0][-1]
cto = toc_m_tic

print "Time of optimization: %.2f sec" % (cto)
print "m star = %s" % m_star
print "lambda star = %s" % lb_star

In [ ]:
l_N += optim_result[1]["nfev"]

print "l(%i) = %i" % (N, l_N)
print "function evaluated at the output = %s" % optim_result[1]["fvec"]

Just need to copy the results in Excel:


In [ ]:
print m_star
print
print "%f \n %i \n %.2f \n %.2f" % (lb_star, l_N, ctp, cto)