In [5]:
# imports / display plots in cell output
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as ss
import pandas as pd
import seaborn as sns
import statsmodels
In [6]:
# Bayesian Model Selection (bor = .6240)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, positive learning rate, negative learning rate
models = ('Model 1', 'Model 2')
y_pos = np.arange(len(models))
pxp = [0.6880, 0.3120]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [7]:
# import post-mfit b1 (bandit_either) summary data
#b1 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b1_d100_table.csv')
b1 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b1_d100_table.csv')
b1 = b1.drop('subID', axis=1)
data = pd.DataFrame(b1)
data.describe()
Out[7]:
In [8]:
# plot differences in payout
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=b1, x='condition', y='payout', palette = ['#FFD479','#D783FF'])
ax.figure.get_axes()[0].set_xticklabels(['bomb','gems'])
#ax.figure.savefig('b1_pointplot')
In [9]:
# Bayesian Model Selection (bor = .778e-21)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [1, 0, 0, 0]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [10]:
# import post-mfit b2 (bandit_either) summary data
#b2 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b2_d100_table.csv')
b2 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b2_d100_table.csv')
b2 = b2.drop('subID', axis=1)
data = pd.DataFrame(b2)
data.describe()
Out[10]:
In [11]:
# plot preference for gems in terms of door probability
pDoor = b2[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
#ax.figure.savefig('b2_pointplot')
In [12]:
# Bayesian Model Selection (bor = .7427)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.3497, 0.1857, 0.1857, 0.2789]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [14]:
# import post-mfit b3 (bandit_either) summary data
#b3 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/clean/b3_d100_table.csv')
b3 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b3_d100_table.csv')
b3 = b3.drop('subID', axis=1)
data = pd.DataFrame(b3)
data.describe()
Out[14]:
In [15]:
# plot preference for gems in terms of door probability
pDoor = b3[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
#ax.figure.savefig('b2_pointplot')
In [43]:
# regression of intergroup bias on model-based preference for gems
data = b3[['igbias','wGems']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='wGems', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'weight on gems parameter'))
ax.savefig('b3_igbias_wGems')
In [17]:
# regression of intergroup bias on preference for gems
data = b3[['igbias','pGems']]
with sns.plotting_context('talk', font_scale=1.4):
ax = (sns.jointplot(x='igbias', y='pGems', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for gems'))
#ax.savefig('b3_igbias_pGems')
In [18]:
# Bayesian Model Selection (bor = 9.7058e-11)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [2.4264e-11, 2.4264e-11, 2.4264e-11, 1.0000]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [21]:
# import post-mfit b4 (bandit_double) summary data
#b4 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b4_d100_table.csv')
b4 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b4_best_table.csv')
b4 = b4.drop('subID', axis=1)
data = pd.DataFrame(b4)
data.describe()
Out[21]:
-participants receive either 'gems' or 'bomb' type reward on each trial
-reward type for each door determined by fixed probability [0.8 0.6 0.4 0.2]
-intergroup bias computed by subtracting outgroup ID from ingroup ID
-probability of reward twice as high as in Experiment 3
In [23]:
# Bayesian Model Selection (bor = .0052)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.0013, 0.0013, 0.7480, 0.2494]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [25]:
# import post-mfit b5 (bandit_either) summary data
#b5 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b5_d100_table.csv')
b5 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b5_d100_table.csv')
data = pd.DataFrame(b5)
data.describe()
Out[25]:
In [26]:
# plot preference for gems in terms of door probability
pDoor = b5[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
ax.figure.savefig('b5_pointplot')
In [35]:
# regression of intergroup bias on preference for gems
data = b5[['igbias','pGems']]
with sns.plotting_context('talk', font_scale=1.4):
ax = (sns.jointplot(x='igbias', y='pGems', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for gems'))
ax.savefig('b5_igbias_pGems')
-participants receive either 'bill' or 'burning bill' type reward on each trial
-reward type for each door determined by fixed probability [0.8 0.6 0.4 0.2]
-intergroup bias computed by subtracting outgroup ID from ingroup ID
-probability of reward same as in Experiment 3 (mean = 0.25)
In [28]:
# Bayesian Model Selection (bor = 4.61e-37)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [1, 0, 0, 0]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [30]:
# import post-mfit b6 (bandit_either) summary data
#b6 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b6_d25_table.csv')
b6 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b6_d25_table.csv')
data = pd.DataFrame(b6)
data.describe()
Out[30]:
In [42]:
# plot preference for gems in terms of door probability
pDoor = b6[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
ax.figure.savefig('b6_pointplot')
In [36]:
# regression of behavioral 'preference for burn' on intergroup bias
data = b6[['pBurn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='pBurn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for burn'))
In [ ]:
# regression of behavioral preference for earn on intergroup bias
data = b6[['pEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='pEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for earn parameter'))
In [37]:
# regression of 'preference for earn' parameter on intergroup bias
data = b6[['wEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='wEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for earn parameter'))
-participants receive either 'bill' or 'burning bill' type reward on each trial
-reward type for each door determined by fixed probability [0.8 0.6 0.4 0.2]
-intergroup bias computed by subtracting outgroup ID from ingroup ID
-probability of reward same as in Experiment 5 (mean = 0.5)
In [38]:
# Bayesian Model Selection (bor = 1.410e-7)
# Model 1: inverse temperature, stickiness, learning rate
# Model 2: inverse temperature, stickiness, gems learning rate, bomb learning rate
# Model 3: inverse temperature, stickiness, positive learning rate, negative learning rate
# Model 4: inverse temperature, stickiness, learning rate, gems preference
models = ('Model 1', 'Model 2', 'Model 3', 'Model 4')
y_pos = np.arange(len(models))
pxp = [0.000001, 0.000001, 0.99999, 0.000001]
plt.bar(y_pos, pxp, align='center', alpha=0.5)
plt.xticks(y_pos, models)
plt.ylabel('protected exceedance probability')
plt.title('Bayesian model selection')
plt.show()
In [39]:
# import post-mfit b5 (bandit_either) summary data
#b7 = pd.read_csv('/Volumes/crisp/hinl/bandit/gems_vs_bomb/rez/b7_d25_table.csv')
b7 = pd.read_csv('~/Desktop/bandit/gems_vs_bomb/rez/b7_d25_table.csv')
data = pd.DataFrame(b7)
data.describe()
Out[39]:
In [41]:
# plot preference for gems in terms of door probability
pDoor = b7[['chose80','chose60','chose40','chose20']]
with sns.plotting_context('talk', font_scale=1.4):
sns.set_style("darkgrid")
ax = sns.pointplot(data=pDoor, palette = ['#3DDA60','#fde73f','#62afea','#EF5050'])
ax.figure.get_axes()[0].set_xticklabels(['0.80','0.60','0.40','0.20'])
ax.figure.savefig('b7_pointplot')
In [ ]:
# regression of behavioral 'preference for burn' on intergroup bias
data = b7[['pEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='pEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for burn'))
In [ ]:
# regression of 'preference for earn' parameter on intergroup bias
data = b7[['wEarn','igbias']]
with sns.plotting_context('talk', font_scale=1.2):
ax = (sns.jointplot(x='igbias', y='wEarn', data=data, kind='reg', annot_kws=dict(stat='r'))
.set_axis_labels('intergroup bias', 'preference for earn parameter'))
In [27]:
!pwd