In [23]:
# imports / display plots in cell output
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as ss
import pandas as pd
import seaborn as sns
import statsmodels
In [24]:
# Bayesian Model Selection (bor = .6240)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, positive learning rate, negative learning rate
models = ('Model 1', 'Model 2')
y_pos = np.arange(len(models))
pxp = [0.6880, 0.3120]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [26]:
# import post-mfit b1 (bandit_either) summary data
#b1 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b1_best_table.csv')
b1 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b1_d100_table.csv')
b1 = b1.drop('subID', axis=1)
data = pd.DataFrame(b1)
data.describe()
Out[26]:
In [27]:
# Bayesian Model Selection (bor = .778e-21)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [1, 0, 0, 0]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [33]:
# import post-mfit b2 (bandit_either) summary data
#b2 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b2_best_table.csv')
b2 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b2_best_table.csv')
b2 = b2.drop('subID', axis=1)
data = pd.DataFrame(b2)
data.describe()
Out[33]:
In [35]:
# make heatmap of all b2 correlations
data = b2[['gems','bomb','it','lr','rt_mean']]
r = data.corr()
with sns.plotting_context("talk", font_scale=1):
ax = sns.heatmap(r, annot=True)
ax.figure.set_size_inches((14, 10))
In [36]:
# Bayesian Model Selection (bor = .7427)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.3497, 0.1857, 0.1857, 0.2789]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [37]:
# import post-mfit b3 (bandit_either) summary data
#b3 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b3_best_table.csv')
b3 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b3_best_table.csv')
b3 = b3.drop('subID', axis=1)
data = pd.DataFrame(b3)
data.describe()
Out[37]:
In [38]:
# make heatmap of all b3 correlations
data = b3[['igbias','gems','bomb','it','lr','pGems','rt_mean']]
r = data.corr()
with sns.plotting_context("talk", font_scale=1):
ax = sns.heatmap(r, annot=True)
# ax.figure.set_size_inches((14, 10))
In [39]:
# Bayesian Model Selection (bor = 9.7058e-11)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [2.4264e-11, 2.4264e-11, 2.4264e-11, 1.0000]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [44]:
# import post-mfit b4 (bandit_double) summary data
b4 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b4_best_table.csv')
b4 = b4.drop('subID', axis=1)
data = pd.DataFrame(b4)
data.describe()
Out[44]:
In [41]:
# make heatmap of all b4 correlations
data = b4[['igbias','gems','bomb','wGems','it','lr','rt_mean']]
r = data.corr()
with sns.plotting_context("talk", font_scale=1):
ax = sns.heatmap(r, annot=True)
ax.figure.set_size_inches((14, 10))
-participants receive either 'gems' or 'bomb' type reward on each trial
-reward type for each door determined by fixed probability [0.8 0.6 0.4 0.2]
-intergroup bias computed by subtracting outgroup ID from ingroup ID
-probability of reward twice as high as in Experiment 3
In [42]:
# Bayesian Model Selection (bor = .0052)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.0013, 0.0013, 0.7480, 0.2494]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [43]:
# import post-mfit b5 (bandit_either) summary data
#b5 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b5_best_table.csv')
b5 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b5_best_table.csv')
data = pd.DataFrame(b5)
data.describe()
Out[43]:
In [45]:
# make heatmap of all b5 correlations
data = b5[['igbias','gems','bomb','pGems','it','lr_pos','lr_neg','rt_mean']]
r = data.corr()
with sns.plotting_context("talk", font_scale=1):
ax = sns.heatmap(r, annot=True)
ax.figure.set_size_inches((14, 10))
-participants receive either 'bill' or 'burning bill' type reward on each trial
-reward type for each door determined by fixed probability [0.8 0.6 0.4 0.2]
-intergroup bias computed by subtracting outgroup ID from ingroup ID
-probability of reward same as in Experiment 3 (mean = 0.25)
In [22]:
# Bayesian Model Selection (bor = 4.61e-37)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [1, 0, 0, 0]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [46]:
# import post-mfit b6 (bandit_either) summary data
b6 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b6_best_table.csv')
data = pd.DataFrame(b6)
data.describe()
Out[46]:
In [52]:
# make heatmap of all b6 correlations
data = b6[['igbias','gems','bomb','pEarn','it','lr','rt_mean']]
r = data.corr()
with sns.plotting_context("talk", font_scale=1):
ax = sns.heatmap(r, annot=True)
ax.figure.set_size_inches((14, 10))
-participants receive either 'bill' or 'burning bill' type reward on each trial
-reward type for each door determined by fixed probability [0.8 0.6 0.4 0.2]
-intergroup bias computed by subtracting outgroup ID from ingroup ID
-probability of reward same as in Experiment 5 (mean = 0.5)
In [48]:
# Bayesian Model Selection (bor = 1.410e-7)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.000001, 0.000001, 0.99999, 0.000001]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [49]:
# import post-mfit b5 (bandit_either) summary data
b7 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b7_best_table.csv')
data = pd.DataFrame(b7)
data.describe()
Out[49]:
In [51]:
# make heatmap of all b7 correlations
data = b7[['igbias','gems','bomb','pEarn','it','lr_pos','lr_neg','rt_mean']]
r = data.corr()
with sns.plotting_context("talk", font_scale=1):
ax = sns.heatmap(r, annot=True)
ax.figure.set_size_inches((14, 10))
In [ ]: