In [3]:
# LPをパーツごとに時間計測する
% matplotlib inline
import matplotlib.pyplot as plt
import quantecon as qe
import numpy as np
from gurobipy import *
import time

class SimpleOG(object):

    def __init__(self, B=10, M=5, alpha=0.5, beta=0.9):

        self.B, self.M, self.alpha, self.beta  = B, M, alpha, beta
        self.n = B + M + 1
        self.m = M + 1

        self.R = np.empty((self.n, self.m))
        self.Q = np.zeros((self.n, self.m, self.n))

        self.populate_Q()
        self.populate_R()

    def u(self, c):
        return c**self.alpha

    def populate_R(self):

        for s in range(self.n):
            for a in range(self.m):
                self.R[s, a] = self.u(s - a) if a <= s else -np.inf

    def populate_Q(self):

        for a in range(self.m):
            self.Q[:, a, a:(a + self.B + 1)] = 1.0 / (self.B + 1)

# Exact LP (GURPBI setting)
def LP_approach(setting):
    num_state = range(1, setting.num_states + 1)
    num_action = range(1, setting.num_actions + 1)
    LP = Model()
    
    # solve
    x = {}
    for i in num_state:
        x[i ] = LP.addVar(vtype = "C", name = "x(%s)" %(i))
    LP.update()
    
    for i in num_state:
        for j in num_action:
            LP.addConstr((x[i] - setting.beta * quicksum(setting.Q[i-1, j-1, k-1] * x[k] for k in num_state)) >= setting.R[i-1,j-1])
            
    LP.setObjective(quicksum(x[i] for i in num_state))
    LP.params.OutputFlag = 0
    LP.optimize()
    
    # v
    v = np.empty(setting.num_states)
    count = 0
    for value in LP.getVars():
        v[count] = value.X
        count += 1
        
    # sigma
    sigma = setting.compute_greedy(v)
    
    #result
    res = qe.markov.ddp.DPSolveResult(v=v, sigma = sigma,
                            mc=setting.controlled_mc(sigma),
                            method = 'Exact Linear Programming (GUROBI setting)',
                            )
    
    
    return res

    
# linear programming time
def elapse_LP(setting):
    
    # setting
    start1 = time.time()
    num_state = range(1, setting.num_states + 1)
    num_action = range(1, setting.num_actions + 1)
    LP = Model()
    
    x = {}
    for i in num_state:
        x[i ] = LP.addVar(vtype = "C", name = "x(%s)" %(i))
    LP.update()
    
    for i in num_state:
        for j in num_action:
            LP.addConstr((x[i] - setting.beta * quicksum(setting.Q[i-1, j-1, k-1] * x[k] for k in num_state)) >= setting.R[i-1,j-1])
            
    LP.setObjective(quicksum(x[i] for i in num_state))
    LP.params.OutputFlag = 0
    print "setting time", time.time() - start1
    
    # optimization time
    start2 = time.time()
    LP.optimize()
    print "optimization time", time.time() - start2
    
    # preparing for answer_ value function
    start3 = time.time()
    v = np.empty(setting.num_states)
    count = 0
    for value in LP.getVars():
        v[count] = value.X
        count += 1
    print "preparing for answer_ value function", time.time() - start3
        
    # preparing for answer_ policy function
    start4 = time.time()
    sigma = setting.compute_greedy(v)
    print "preparing for answer_ policy function", time.time() - start4
    
    #result
    res = qe.markov.ddp.DPSolveResult(v=v, sigma = sigma,
                            mc=setting.controlled_mc(sigma),
                            method = 'Exact Linear Programming (GUROBI setting)',
                            )

# value function time
def elapse_value(setting):
    start = time.time()
    setting.solve(method='value_iteration')
    elapsed_time = time.time() - start
    return elapsed_time

# plot time until num_state = n
def plot_graph(n, shock, discount):
    a = [None] * n
    b = [None] * n
    for i in range(n):
        h = SimpleOG(M = i, B = shock, beta = discount)
        d = qe.markov.DiscreteDP(h.R, h.Q, h.beta)
        a[i] = elapse_LP(d)
        b[i] = elapse_value(d)

    plt.plot(a)
    plt.plot(b)
    
# time changing beta LP
def plot_beta_LP(shock, state, betas):
    n = [None] * len(betas)
    for i, value in enumerate(betas):
        model = SimpleOG(B = shock, M = state, beta = value)
        ddp = qe.markov.DiscreteDP(model.R, model.Q, model.beta)
        n[i] = elapse_LP(ddp)
    plt.plot(n)
    
#time changing beta value function
def plot_beta_value(shock, state, betas):
    n = [None] * len(betas)
    for i, value in enumerate(betas):
        model = SimpleOG(B = shock, M = state, beta = value)
        ddp = qe.markov.DiscreteDP(model.R, model.Q, model.beta)
        n[i] = elapse_value(ddp)
    plt.plot(n)




In [4]:
# ということでsettingの問題です。
model = SimpleOG(B = 10, M = 50, beta = 0.99)
ddp = qe.markov.DiscreteDP(model.R, model.Q, model.beta)
elapse_LP(ddp)


setting time 7.82762122154
optimization time 0.031359910965
preparing for answer_ value function 0.000164985656738
preparing for answer_ policy function 0.000349998474121

In [5]:
# より詳しく時間を分けます
def elapse_LP_div(setting):
    
    # setting_model
    start1 = time.time()
    num_state = range(1, setting.num_states + 1)
    num_action = range(1, setting.num_actions + 1)
    LP = Model()
    print "setting_model", time.time() - start1
    
    # setting_vars
    start5 = time.time()
    x = {}
    for i in num_state:
        x[i ] = LP.addVar(vtype = "C", name = "x(%s)" %(i))
    LP.update()
    print "setting_vars", time.time() - start5
    
    # setting const
    start6 = time.time()
    for i in num_state:
        for j in num_action:
            LP.addConstr((x[i] - setting.beta * quicksum(setting.Q[i-1, j-1, k-1] * x[k] for k in num_state)) >= setting.R[i-1,j-1])
    print "setting const", time.time() - start6
    
    
    # setting obj
    start7 = time.time()
    LP.setObjective(quicksum(x[i] for i in num_state))
    LP.params.OutputFlag = 0
    print "setting obj", time.time() - start7
    
    
    # optimization time
    start2 = time.time()
    LP.optimize()
    print "optimization time", time.time() - start2
    
    # preparing for answer_ value function
    start3 = time.time()
    v = np.empty(setting.num_states)
    count = 0
    for value in LP.getVars():
        v[count] = value.X
        count += 1
    print "preparing for answer_ value function", time.time() - start3
        
    # preparing for answer_ policy function
    start4 = time.time()
    sigma = setting.compute_greedy(v)
    print "preparing for answer_ policy function", time.time() - start4
    
    #result
    res = qe.markov.ddp.DPSolveResult(v=v, sigma = sigma,
                            mc=setting.controlled_mc(sigma),
                            method = 'Exact Linear Programming (GUROBI setting)',
                            )

In [6]:
# 一回目
elapse_LP_div(ddp)


setting_model 8.79764556885e-05
setting_vars 0.000349044799805
setting const 8.07694411278
setting obj 0.000422954559326
optimization time 0.0308258533478
preparing for answer_ value function 0.000264883041382
preparing for answer_ policy function 0.000499963760376

In [7]:
# 二回目
elapse_LP_div(ddp)


setting_model 0.00010085105896
setting_vars 0.000320911407471
setting const 7.98858618736
setting obj 0.000387907028198
optimization time 0.0295398235321
preparing for answer_ value function 0.000235080718994
preparing for answer_ policy function 0.000458955764771

In [8]:
# ということでやはり制約条件の追加が遅いです。

In [12]:
# gurobiは制約をnumpy arrayにできるか
# できない
from gurobipy import *
import numpy as np

model = Model("k")
x1 = model.addVar(ub =5, lb=0 ,name="x1")
x2 = model.addVar(ub =6 ,lb=0, name="x2")
x3 = model.addVar(ub = 4, lb=0,name="x3")
model.update()

a = x1 + x2 + x3 == 7 # 名前はつけれる
b = x1 + x3 <= 5
c = np.empty(2)
c[0] = a
c[1] = b
for i in c:
    model.addConstr(i)

model.setObjective(121- x1 - 3*x2 - 4*x3)
model.params.OutputFlag = 0
from __future__ import division
model.optimize()
print "Opt. Value= " ,model.ObjVal


---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-12-4c23e53cfbd0> in <module>()
     12 b = x1 + x3 <= 5
     13 c = np.empty(2)
---> 14 c[0] = a
     15 c[1] = b
     16 for i in c:

TypeError: float() argument must be a string or a number

In [ ]:
#