In [1]:
SEED = sum(map(ord, "FOURIER_MSRA_2D"))
In [2]:
import sys
sys.path.append('../../..')
In [3]:
from lib.msra_analytical import AnalyticalLossFunctionAbs
from lib.tchebychev import ChebyshevFun
In [4]:
import warnings
warnings.filterwarnings('error')
In [5]:
import numpy
import scipy
import scipy.stats
from scipy.integrate import quad
In [6]:
def fourier_integral1d(cplx_integrand, j, x, alpha):
def real_integrand(u):
return scipy.real(cplx_integrand(u, j, x, alpha))
real_quad = quad(real_integrand, -numpy.inf, numpy.inf, epsrel=1e-4)[0]
return real_quad
In [7]:
class FourierGaussLossFunction(AnalyticalLossFunctionAbs):
def __init__(self, v_mu, v_sigma, c=None):
self.__v_mu = numpy.array(v_mu)
self.__v_sigma = numpy.array(v_sigma).reshape((len(v_mu), len(v_mu)))
self.__minus_1_half_over_pi = -0.5 / numpy.pi
self.__1_over_pi = 1. / numpy.pi
super(FourierGaussLossFunction, self).__init__(len(v_mu), c)
def moment_generating(self, t, i):
mu = self.__v_mu[i]
sigma2 = self.__v_sigma[i, i]
log_part = mu * t + 0.5 * sigma2 * t**2
return numpy.exp(log_part)
def e(self, m):
return (self.__v_mu - m).sum()
def g_fourier_integrand(self, u, j, m_j, eta):
i = 1j
eta_m_iu = eta - i * u
res = numpy.exp(-eta_m_iu * m_j)
res *= self.moment_generating(eta_m_iu, j)
res /= i * (u + i * eta)**3
return res
def g(self, i, m_i):
continue_bool = True
eta = 1.5 * numpy.random.rand()
while continue_bool:
try:
integral = fourier_integral1d(self.g_fourier_integrand, i, m_i, eta)
continue_bool = False
return self.__1_over_pi * integral
except scipy.integrate.IntegrationWarning, e:
#print "g not converging for x = %s, alpha = %s" % (m_i, eta)
eta = 1.5 * numpy.random.rand()
def f_fourier_integrand(self, u, j, m_j, eta):
i = 1j
eta_m_iu = eta - i * u
res = numpy.exp(-eta_m_iu * m_j)
res *= self.moment_generating(eta_m_iu, j)
res /= (u + i * eta)**2
return res
def f(self, i, m_i):
continue_bool = True
eta = 1.5 * numpy.random.rand()
while continue_bool:
try:
integral = fourier_integral1d(self.f_fourier_integrand, i, m_i, eta)
continue_bool = False
return self.__minus_1_half_over_pi * integral
except scipy.integrate.IntegrationWarning, e:
#print "f not converging for x = %s, alpha = %s" % (m_i, eta)
eta = 1.5 * numpy.random.rand()
def shortfall_risk(self, m=None):
m = self._check_argument(m)
sum_e = self.e(m)
sum_g = 0.
for i, m_i in enumerate(m):
sum_g += self.g(i, m_i)
return sum_e + 0.5 * sum_g
def shortfall_risk_jac(self, m):
m = self._check_argument(m)
res = []
for i, m_i in enumerate(m):
partial_der = 1 + self.f(i, m_i)
res.append(partial_der)
return numpy.array(res)
In [8]:
class ChebyshevGaussLossFunction(FourierGaussLossFunction):
def __init__(self, low_bounds, upp_bounds, nb_nodes, v_mu, v_sigma, c=None):
super(ChebyshevGaussLossFunction, self).__init__(v_mu, v_sigma, c)
self.__init_cheby_interpol(low_bounds, upp_bounds, nb_nodes)
def __init_cheby_interpol(self, low_bounds, upp_bounds, nb_nodes):
self.f_interpol, self.g_interpol = [], []
for i in xrange(self.dim):
tmp_f = lambda x: self.f(i, x)
f_cheb = ChebyshevFun(tmp_f, [low_bounds[0]], [upp_bounds[0]], [nb_nodes[0]])
self.f_interpol.append(f_cheb)
tmp_g = lambda x: self.g(i, x)
g_cheb = ChebyshevFun(tmp_g, [low_bounds[1]], [upp_bounds[1]], [nb_nodes[1]])
self.g_interpol.append(g_cheb)
def shortfall_risk(self, m=None):
m = self._check_argument(m)
sum_e = self.e(m)
sum_g = 0.
for i, m_i in enumerate(m):
sum_g += self.g_interpol[i](m_i)
return sum_e + 0.5 * sum_g
def shortfall_risk_jac(self, m):
m = self._check_argument(m)
res = []
for i, m_i in enumerate(m):
partial_der = 1 + self.f_interpol[i](m_i)
res.append(partial_der)
return numpy.array(res)
In [9]:
l_bounds = [-10., -10.]
u_bounds = [10., 10.]
nodes = [30, 30]
In [10]:
mu = [0., 0.]
sigma = [[1., 0.], [0., 1.]]
c = 1.
loss = ChebyshevGaussLossFunction(l_bounds, u_bounds, nodes, mu, sigma, c)
In [11]:
from scipy.optimize import minimize
maxiter = 10000
In [12]:
guess = 0. * numpy.ones(loss.dim)
In [13]:
cons = ({'type': 'ineq',
'fun' : lambda x: loss.ineq_constraint(x),
'jac' : lambda x: loss.ineq_constraint_jac(x)})
In [14]:
numpy.random.seed(SEED)
res = minimize(loss.objective, guess,
jac=loss.objective_jac,
constraints=cons,
method='SLSQP',
options={'maxiter': maxiter, 'disp': True})
In [15]:
%%timeit
numpy.random.seed(SEED)
res = minimize(loss.objective, guess,
jac=loss.objective_jac,
constraints=cons,
method='SLSQP',
options={'maxiter': maxiter})
In [16]:
print res
print
print loss.ineq_constraint(res.x)