In [1]:
import sys

sys.path.append('../../..')

In [2]:
import numpy
import scipy
import scipy.stats

from lib.msra_analytical import AnalyticalLossFunctionAbs

In [3]:
class AnalyticalGaussLossFunction(AnalyticalLossFunctionAbs):
    def __init__(self, v_mu, v_sigma, c=None):
        self.__v_mu = numpy.array(v_mu)
        self.__v_sigma = numpy.array(v_sigma).reshape((len(v_mu), len(v_mu)))
        
        self.__sqrt_1_over_2pi = 1. / numpy.sqrt(2 * numpy.pi)
        
        super(AnalyticalGaussLossFunction, self).__init__(len(v_mu), c)
        
    def __e(self, m):
        return (self.__v_mu - m).sum()
    
    def __g(self, m):
        res = 0.
        for i, m_i in enumerate(m):
            mu = self.__v_mu[i]
            sigma2 = self.__v_sigma[i, i]
            sigma = numpy.sqrt(sigma2)
            
            first_term = ((mu - m_i)**2 + sigma2) * scipy.stats.norm.cdf((mu - m_i) / sigma)
            sec_term = (mu - m_i) * sigma * numpy.exp(-.5 * ((mu - m_i) / sigma)**2) * self.__sqrt_1_over_2pi
            
            res += first_term + sec_term            
        return res
    
    def __f(self, m):
        res = []
        for i, m_i in enumerate(m):
            mu = self.__v_mu[i]
            sigma = numpy.sqrt(self.__v_sigma[i, i])

            first_term = (mu - m_i) * scipy.stats.norm.cdf((mu - m_i) / sigma)
            sec_term = self.__sqrt_1_over_2pi * sigma * numpy.exp(-.5 * ((mu - m_i) / sigma)**2)
            
            res.append(first_term + sec_term)
            
        return res
        
    def shortfall_risk(self, m=None):
        m = self._check_argument(m)
        
        sum_e = self.__e(m)
        sum_g = self.__g(m)
        
        return sum_e + 0.5 * sum_g
    
    def shortfall_risk_jac(self, m):
        m = self._check_argument(m)
        
        f = self.__f(m)
        
        res = []        
        for i in xrange(self.dim):
            partial_der = 1 + f[i]
            res.append(partial_der)
            
        return numpy.array(res)

In [4]:
mu = [0., 0.]
sigma = [[1., 0.], [0., 1.]]

c = 1.

loss = AnalyticalGaussLossFunction(mu, sigma, c)

In [5]:
from scipy.optimize import minimize

maxiter = 3500

In [6]:
guess = 1000. * numpy.ones(loss.dim)

In [7]:
cons = ({'type': 'ineq',
         'fun' : lambda x: loss.ineq_constraint(x),
         'jac' : lambda x: loss.ineq_constraint_jac(x)})

In [8]:
res = minimize(loss.objective, guess, 
               jac=loss.objective_jac, 
               constraints=cons, 
               method='SLSQP',
               options={'maxiter': maxiter})

In [9]:
%%timeit

res = minimize(loss.objective, guess, 
               jac=loss.objective_jac, 
               constraints=cons, 
               method='SLSQP',
               options={'maxiter': maxiter})


100 loops, best of 3: 4.57 ms per loop

In [10]:
print res
print
print loss.ineq_constraint(res.x)


     fun: -0.34621051382304013
     jac: array([ 1.,  1.,  0.])
 message: 'Optimization terminated successfully.'
    nfev: 10
     nit: 10
    njev: 10
  status: 0
 success: True
       x: array([-0.17310526, -0.17310526])

-3.66895003268e-09