This code needs clean up, because a part of it was copied from another code

```
In [4]:
```#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, with_statement
import csv, os
import numpy as np
import statsmodels.api as sm
proj_dir="/mnt/net/LaCie/Analysis/RuleSwitch/"
subjects = ['001', '002', '003', '004', '005', '006', '008', '010', '011', '012', '013', '014', '015', '016', '017', '018', '019', '020', '021', '022', '023', '024', '025', '026', '027']
mat_rules_vec=[]
cat_rules_vec=[]
mat_tc_vec1=[]
cat_tc_vec1=[]
mat_tc_vec2=[]
cat_tc_vec2=[]
mat_tc_vec3=[]
cat_tc_vec3=[]
for subj in subjects:
ss_path = os.path.join(proj_dir,'Raw_Behave',subj+'data.txt')
with open(ss_path) as ss_data:
reader = csv.reader(ss_data, delimiter="\t")
ss_data = list(reader)
ss_data = np.array(ss_data)
#print "Subject ",subj
# Setup correct vectors for all correct trials
cor_mat=[] # all correct matching trials
cor_cat=[] # all correct classification trials
mat_trials=[]
for row in xrange(len(ss_data)):
if (subj == '003' or subj == '011' or subj == '016' or subj == '020') and row < 32:
pass
else:
answer = str(ss_data[row][16]) #feedback text
phase = int(ss_data[row][2]) # 1 - matching, 2 - classification
if answer == "Correct" or answer == "Correct. The correct category is 'A'." or answer == "Correct. The correct category is 'B'.":
if phase == 1:
cor_mat.append(1)
else:
cor_cat.append(1)
elif answer == "Failed to respond" or answer == "Failed to respond. The correct category is 'A'." or answer == "Failed to respond. The correct category is 'B'.":
if phase == 1:
cor_mat.append(9) # NA
else:
cor_cat.append(9) # NA
else:
if phase == 1:
cor_mat.append(0)
else:
cor_cat.append(0)
# make a vector of trial numbers for matching
if phase == 1:
mat_trials.append(int(ss_data[row][6]))
# Now go through the classification correct vector and select only trials where more than 3 correct
att_cor_cat=np.zeros((len(cor_cat),2))
cor_mat_np=np.zeros((len(cor_mat),2))
cor_mat_np[:,0]=cor_mat
cor_mat_np[:,1]=cor_mat
#Create a list of trial stimuli
trial_stim=ss_data[np.where(ss_data[:,2]=="2"),7][0]
labels=ss_data[np.where(ss_data[:,2]=="2"),16][0]
labels=[labels[i][-3] for i in xrange(len(labels))]
# set a function that selects what rules could be used for a specific choice
def rule_select(trial,label):
rset1=[4,3,2,1]
rset2=[8,7,6,5]
keep_rules=[]
for j in xrange(len(trial)):
if label == 'A': #the label is correct for RuleSwitch project, but can be incorrect for other.
if trial[j] == '1':
keep_rules.append(rset1[j])
else:
keep_rules.append(rset2[j])
else:
if trial[j] == '2':
keep_rules.append(rset1[j])
else:
keep_rules.append(rset2[j])
return keep_rules
# loop through rows and select which correct to keep
for row in range(3,len(cor_cat)):
trial=trial_stim[row]
prev_trial=trial_stim[row-1]
label=labels[row]
prev_label=labels[row-1]
if cor_cat[row] == 1 and cor_cat[row-1] == 1 and cor_cat[row-2] == 1 and cor_cat[row-3] == 1:
att_cor_cat[row]=[1,1]
att_cor_cat[row-1]=[1,1]
att_cor_cat[row-2]=[1,1]
att_cor_cat[row-3]=[1,1]
cur_rules = rule_select(trial,label)
prev_rules = rule_select(prev_trial,prev_label)
rules_keep = list(set(prev_rules) & set(cur_rules))
if len(rules_keep)==0:
att_cor_cat[row]=[0,0]
elif cor_cat[row] == 0 and (att_cor_cat[row-1] == np.array([1.,1.])).all():
cur_rules = rule_select(trial,label)
prev_rules = rule_select(prev_trial,prev_label)
rules_keep = list(set(prev_rules) & set(cur_rules))
if len(rules_keep)>0:
att_cor_cat[row]=[1,0]
elif cor_cat[row] == 9:
att_cor_cat[row]=[9,9]
for row in xrange(3):
if cor_cat[row] == 9:
att_cor_cat[row]=[9,9]
# make a counter of correct answers (reset on incorrect) for categorization
mod=0
att_cor_cat_mod=[]
for row in xrange(len(att_cor_cat)):
if att_cor_cat[row,1]==1:
mod+=1
else:
mod=0
att_cor_cat_mod.append(mod)
# count rules
mat_rules = (mat_trials.count(0)-1)/(len(mat_trials)/32)
cat_rules = att_cor_cat_mod.count(1)/(len(att_cor_cat_mod)/32)
#print "Matching rules per run: ", mat_rules
#print "Classification rules per run: ", cat_rules
# Add to common vector
mat_rules_vec.append(mat_rules)
cat_rules_vec.append(cat_rules)
# Trials to criterion
mat_tc_list=[]
for i in xrange(1,len(mat_trials)):
if int(mat_trials[i]) == 0:
mat_tc_list.append(int(mat_trials[i-1]))
cat_tc_list=[]
cat_tc=1
for i in xrange(1,len(att_cor_cat_mod)):
if int(att_cor_cat_mod[i]) == 0 and int(att_cor_cat_mod[i-1]) >= 4 and cat_tc != 0: # new rule
cat_tc_list.append(cat_tc)
cat_tc=1
else:
cat_tc+=1
# Add to common vector
mat_tc_vec1.extend(mat_tc_list)
cat_tc_vec1.extend(cat_tc_list)
mat_tc_vec2.extend([1]*len(mat_tc_list))
cat_tc_vec2.extend([0]*len(cat_tc_list))
mat_tc_vec3.extend([int(subj)]*len(mat_tc_list))
cat_tc_vec3.extend([int(subj)]*len(cat_tc_list))
print mat_tc_vec1
print mat_tc_vec2
print mat_tc_vec3
print
print cat_tc_vec1
print cat_tc_vec2
print cat_tc_vec3
#### CONTINUE IN analysis.r FILE ####
# print "===Analysis of rules per run per subject==="
# # mean and standard deviation
# print "Matching"
# print "Rules per run per subject: ", mat_rules_vec
# print "Mean: ", np.mean(mat_rules_vec)
# print "SD: ", np.std(mat_rules_vec)
# print ""
# print "Classification"
# print "Rules per run per subject: ", cat_rules_vec
# print "Mean: ", np.mean(cat_rules_vec)
# print "SD: ", np.std(cat_rules_vec)
# print ""
# # t-test
# t, p, df = sm.stats.ttest_ind(mat_rules_vec, cat_rules_vec)
# print "t-test:"
# print "t=",t
# print "p=",p
# print "df=",df

```
```

```
In [ ]:
```# print "===Analysis of trials to criterion per subject==="
# print "Matching"
# print "Rules per run per subject: ", mat_tc_vec
# print "Mean: ", np.mean(mat_tc_vec)
# print "SD: ", np.std(mat_tc_vec)
# print ""
# print "Classification"
# print "Rules per run per subject: ", cat_tc_vec
# print "Mean: ", np.mean(cat_tc_vec)
# print "SD: ", np.std(cat_tc_vec)
# print ""
# # t-test
# t, p, df = sm.stats.ttest_ind(mat_tc_vec, cat_tc_vec)
# print "t-test:"
# print "t=",t
# print "p=",p
# print "df=",df

```
In [ ]:
```