In [ ]:
# multivariate-univariate dependence analysis for toy data, but feel free to load your own in variable called patterns
# patterns should be numVoxels x numPatterns
# see Aly M & Turk-Browne NB. (2016). Attention stabilizes representations in the human hippocampus. Cerebral Cortex, 26, 783-796.
#
# Mariam Aly
#
In [ ]:
# how many patterns you want, if using toy data
numPatt = 4
# how many voxels per pattern you want, if using toy data
numVox = 50
In [ ]:
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
%matplotlib inline
In [ ]:
# make up toy patterns
patterns = np.random.randn(numVox,numPatt)
In [ ]:
# find univariate activity for each voxel
tmpAct = patterns.mean(axis=1)
uniAct = tmpAct.reshape(numVox,1)
In [ ]:
# subtract the mean for each pattern
patt_mean = patterns.mean(axis=0)
patt_mean_sub = patterns - patt_mean
In [ ]:
# divide by the root sub of squares
root_ss = np.sqrt(sum(patt_mean_sub ** 2))
zpatt = patt_mean_sub / root_ss
In [ ]:
patts = list(range(0,numPatt))
pattsToLoopThrough = patts[0:-1] # exclude last pattern, because patterns will be multiplied with all pattern indices > current; by the time you get to the last pattern, it has already been multiplied with all others
pattProd = []
numCols = 0
for i in pattsToLoopThrough:
tmpPatt = zpatt[:,i]
thisPatt = tmpPatt.reshape(numVox,1)
idx_pattsToMultiply = [idx for idx in patts if idx > i] # get product with all pattern indices > current, so no redundancy
for j in idx_pattsToMultiply:
tmpPattToMultiply = zpatt[:,j]
tmpPattToMultiply = tmpPattToMultiply.reshape(numVox,1)
tmpPattProd = thisPatt * tmpPattToMultiply
pattProd.append(tmpPattProd)
numCols = numCols + 1 # ultimate number of columns in patt prod
# check that sum of normalized products is the pearson r between patterns (to 8 digits, because of rounding error)
if round(sum(tmpPattProd)[0],8) == round(stats.pearsonr(thisPatt, tmpPattToMultiply)[0][0], 8):
print("Yes")
else:
print("No")
In [ ]:
# get mean contribution to pattern stability for each voxel
# reshape pattProd into array
pattProdArray = np.asarray(pattProd)
pattProdArray = pattProdArray.T
pattProdArray.reshape(numVox,numCols)
pattProdArray = np.squeeze(pattProdArray)
# get mean contribution for each voxel
meanContribToPattSim = pattProdArray.mean(axis=1)
meanContribToPattSim = meanContribToPattSim.reshape(numVox,1)
In [ ]:
# correlate univariate activity with mean normalized product at each voxel
stats.pearsonr(uniAct, meanContribToPattSim) # returns r and p-value
In [ ]:
# plot
myPlot = plt.scatter(uniAct,meanContribToPattSim, color = 'r', marker='o', alpha=.4, s = 120, edgecolors = 'k', linewidths = 3)
plt.xlabel("univariate activity")
plt.ylabel("contribution to pattern similarity")
plt.show()
In [ ]: