In [51]:
#!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
KMR (Kandori-Mailath-Rob) Model
Copyright (c) 2015 @myuuuuun
https://github.com/myuuuuun/KMR
Released under the MIT license.
"""
# エラー処理の類はまた今度
# 無駄が多い部分があるものの、それもまた今度
%matplotlib inline
import numpy as np
import scipy as sc
import quantecon as qe
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from kmr import *
マルコフ連鎖の遷移行列は、
In [12]:
array = [[4, 0], [3, 2]]
kmr = KMR(array, 10, 0.1)
rst = ""
for v in kmr.transition_matrix:
for j in v:
rst += "{0:.3f} ".format(j)
rst += "\n"
print(rst)
※列: 遷移前の行動1をとるプレイヤー数, 行: 行動1をとるプレイヤー数
となる。
定常分布は、
In [52]:
state_players = kmr.state_players
stationary_distribution = kmr.compute_stationary_distribution()[0]
for p, d in zip(state_players, stationary_distribution):
print("{0}: {1:.3f}".format(p, d))
※ [行動1をとっている人数, 行動2をとっている人数]: 確率
となる。パレート効率的でない方の純粋戦略ナッシュ均衡に収束しているのが面白いところ。
1000期でシミュレートしてみると、状態の遷移状況は、
In [48]:
simulated = kmr.from_stateindex_to_stateplayersnum(kmr.simulate(1000, 5, start_init=True))
print(simulated)
となる。これをグラフにプロットしてみると、
In [26]:
kmr.plot_simulation(1000, 5)
となり、おおむね定常分布に一致していることがわかる。
他の初期状態の場合でも試してみると、
In [27]:
kmr.plot_simulation(1000, 0)
kmr.plot_simulation(1000, 2)
kmr.plot_simulation(1000, 4)
kmr.plot_simulation(1000, 6)
kmr.plot_simulation(1000, 8)
kmr.plot_simulation(1000, 10)
初期状態が0〜6の場合は定常分布通りになるが、初期状態が8や10だと(行動1, 行動1)のナッシュ均衡が実現しやすそう。
time seriesを1000期から増やしてみると、
In [54]:
kmr.plot_simulation(10000, 8)
kmr.plot_simulation(10000, 10)
kmr.plot_simulation(100000, 10)
kmr.plot_simulation(500000, 10)
kmr.plot_simulation(1000000, 10)
途中で偶然にも(行動2, 行動2)のナッシュ均衡の側に近づけば、それ以降は(ほぼ)ずっとそっちに張り付く。
In [35]:
array = [[4, 0], [3, 2]]
kmr2 = KMR(array, 10, 0.2)
kmr3 = KMR(array, 10, 0.01)
# ε=0.2の定常分布
rst = ""
for v in kmr2.transition_matrix:
for j in v:
rst += "{0:.3f} ".format(j)
rst += "\n"
print("ε=0.2の定常分布")
print(rst)
# ε=0.01の定常分布
rst = ""
for v in kmr3.transition_matrix:
for j in v:
rst += "{0:.3f} ".format(j)
rst += "\n"
print("\nε=0.01の定常分布")
print(rst)
In [34]:
kmr2.plot_simulation(1000, 0)
kmr2.plot_simulation(1000, 2)
kmr2.plot_simulation(1000, 4)
kmr2.plot_simulation(1000, 6)
kmr2.plot_simulation(1000, 8)
kmr2.plot_simulation(1000, 10)
「実験」をする確率が上がると、2つの(純粋戦略)ナッシュ均衡をいったり来たりする
In [55]:
kmr2.plot_simulation(10000, 10)
kmr2.plot_simulation(50000, 10)
kmr2.plot_simulation(100000, 10)
kmr2.plot_simulation(500000, 10)
kmr2.plot_simulation(1000000, 10)
In [36]:
kmr3.plot_simulation(1000, 0)
kmr3.plot_simulation(1000, 2)
kmr3.plot_simulation(1000, 4)
kmr3.plot_simulation(1000, 6)
kmr3.plot_simulation(1000, 8)
kmr3.plot_simulation(1000, 10)
「実験」をする確率が下がると、1つの純粋戦略ナッシュ均衡に張り付いたら、離れづらい。
In [53]:
kmr3.plot_simulation(10000, 10)
kmr3.plot_simulation(50000, 10)
kmr3.plot_simulation(100000, 10)
kmr3.plot_simulation(500000, 10)
kmr3.plot_simulation(1000000, 10)
離れづらい。
In [59]:
array = [[6, 0, 0],
[5, 7, 5],
[0, 5, 8]]
kmr = KMR(array, 10, 0.1)
state_players = kmr.state_players
stationary_distribution = kmr.compute_stationary_distribution()[0]
for p, d in zip(state_players, stationary_distribution):
print("{0}: {1:.3f}".format(p, d))
となる((行動2, 行動2)のナッシュ均衡に落ち着く確率が高そう)。
time_series=1000でsimulateしてみると、
In [68]:
kmr.plot_simulation(1000, kmr.from_stateplayersnum_to_stateindex([0, 0, 10]))
kmr.plot_simulation(1000, kmr.from_stateplayersnum_to_stateindex([0, 5, 5]))
kmr.plot_simulation(1000, kmr.from_stateplayersnum_to_stateindex([0, 10, 0]))
kmr.plot_simulation(1000, kmr.from_stateplayersnum_to_stateindex([5, 0, 5]))
kmr.plot_simulation(1000, kmr.from_stateplayersnum_to_stateindex([5, 5, 0]))
kmr.plot_simulation(1000, kmr.from_stateplayersnum_to_stateindex([10, 0, 0]))
(行動1, 行動1)の均衡は長続きしない。
time_series=50000だと、
In [69]:
kmr.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([0, 0, 10]))
kmr.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([0, 5, 5]))
kmr.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([0, 10, 0]))
kmr.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([5, 0, 5]))
kmr.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([5, 5, 0]))
kmr.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([10, 0, 0]))
(行動2, 行動2)や(行動3, 行動3)の均衡がよく起こる。
In [70]:
array = [[6, 0, 0],
[5, 7, 5],
[0, 5, 8]]
kmr2 = KMR(array, 10, 0.2)
kmr2.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([0, 0, 10]))
kmr2.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([0, 5, 5]))
kmr2.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([0, 10, 0]))
kmr2.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([5, 0, 5]))
kmr2.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([5, 5, 0]))
kmr2.plot_simulation(50000, kmr.from_stateplayersnum_to_stateindex([10, 0, 0]))
In [73]:
array = [[6, 0, 0],
[5, 7, 5],
[0, 5, 8]]
kmr3 = KMR(array, 10, 0.01)
kmr3.plot_simulation(500000, kmr.from_stateplayersnum_to_stateindex([0, 0, 10]))
kmr3.plot_simulation(500000, kmr.from_stateplayersnum_to_stateindex([0, 5, 5]))
kmr3.plot_simulation(500000, kmr.from_stateplayersnum_to_stateindex([0, 10, 0]))
kmr3.plot_simulation(500000, kmr.from_stateplayersnum_to_stateindex([5, 0, 5]))
kmr3.plot_simulation(500000, kmr.from_stateplayersnum_to_stateindex([5, 5, 0]))
kmr3.plot_simulation(500000, kmr.from_stateplayersnum_to_stateindex([10, 0, 0]))
In [ ]: