example different than, but based on: http://mumfordbrainstats.tumblr.com/post/131150941751/efficiency-day3-matlab-example
In [1]:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.stats
from __future__ import division
import pprint
%matplotlib inline
We first define the function of the HRF as it is defined in SPM and FSL.
In [2]:
def spm_hrf(TR,p=[6,16,1,1,6,0,32]):
p=[float(x) for x in p]
fMRI_T = 16.0
TR=float(TR)
dt = TR/fMRI_T
u = np.arange(p[6]/dt + 1) - p[5]/dt
hrf=scipy.stats.gamma.pdf(u,p[0]/p[2],scale=1.0/(dt/p[2])) - scipy.stats.gamma.pdf(u,p[1]/p[3],scale=1.0/(dt/p[3]))/p[4]
good_pts=np.array(range(np.int(p[6]/TR)))*fMRI_T
good_pts_ls = [int(x) for x in good_pts]
hrf=hrf[good_pts_ls]
hrf = hrf/np.sum(hrf);
return hrf
In [3]:
resolution = 0.25
hrf25 = spm_hrf(resolution)
xaxis = np.arange(0,len(hrf25)/4,0.25) #making sure x-axis displays seconds
plt.plot(xaxis,hrf25)
Out[3]:
We will now generate a design matrix for an experiment with 2 different stimulus types (eg. faces and houses) with 100 trials. The intertrial interval (ITI) is 4 seconds:
| stim | ITI | stim | ITI | stim | ITI | stim | ITI | stim | ITI |
|--2s--|-----4s-----|--2s--|-----4s-----|--2s--|-----4s-----|--2s--|-----4s-----|--2s--|-----4s-----|
We will first specify all design parameters and the onsets in seconds.
In [4]:
ntrials = 100
TR_s = 2
stimDur_s = 2
ITI_s = 4
totalDur_s = int((stimDur_s+ITI_s) * ntrials)
onsets = np.arange(0,totalDur_s,(ITI_s+stimDur_s))
print("onsets:\n"+str(onsets))
order = np.array([0,1]*int(ntrials/2))
print("order:\n"+str(order))
X0_onsets_s = onsets[order==0]
X1_onsets_s = onsets[order==1]
print("- stimulus 0 onsets:\n"+ str(X0_onsets_s))
print("- stimulus 1 onsets:\n"+ str(X1_onsets_s))
We will move now to resolution units. As such, all variables will be interpretable in terms of the number of timepoints in our design matrix.
In [5]:
TR = int(TR_s/resolution)
totalDur = int(totalDur_s/resolution)
stimDur = int(stimDur_s/resolution)
X0_onsets = [int(x/resolution) for x in X0_onsets_s]
X1_onsets = [int(x/resolution) for x in X1_onsets_s]
print("- stimulus onsets:\n"+ str(X0_onsets))
print("- response onsets:\n"+ str(X1_onsets))
We will now make a design matrix for the stimuli:
In [6]:
# 1
X0 = np.zeros(int(totalDur))
# 2
X0[X0_onsets] = 1
# 3
for shift in np.arange(1,int(stimDur)):
ind = [x+shift for x in X0_onsets]
X0[ind] = 1
# 4
X0Z_highres = np.convolve(X0,hrf25)[:len(X0)]
#5
TRid = np.arange(0,totalDur,TR)
X0Z = X0Z_highres[TRid]
#plots
plt.figure(figsize=(18,2))
plt.plot(X0)
plt.title("design matrix X_0")
plt.ylim([-0.1,1.1])
plt.figure(figsize=(18,2))
plt.plot(X0Z_highres)
plt.title("convolved design matrix X_0")
plt.ylim([-0.1,0.5])
plt.figure(figsize=(18,2))
plt.title("downsampled convolved design matrix X_0")
plt.plot(X0Z.T,color="grey",lw=1)
plt.plot(X0Z.T,marker='o',lw=0)
plt.ylim([-0.1,0.5])
Out[6]:
The design matrix is now constructed, but in resolution units. What we measure is only a subset of this: we will now downsample to TR units.
In [7]:
X1 = np.zeros(int(totalDur))
X1[X1_onsets] = 1
for shift in np.arange(1,int(stimDur)):
ind = [x+shift for x in X1_onsets]
X1[ind] = 1
X1Z_highres = np.convolve(X1,hrf25)[:len(X1)]
TRid = np.arange(0,totalDur,TR)
X1Z = X1Z_highres[TRid]
In [8]:
XZ = np.array([X0Z,X1Z])
plt.figure(figsize=(18,3))
plt.plot(XZ.T)
plt.ylim([-0.1,0.5])
Out[8]:
In [9]:
# mean centering
means = XZ.dot(np.ones(XZ.shape[1]).T)/XZ.shape[1]
means_expand = np.outer(means,np.ones(XZ.shape[1]))
XZ_demeaned = XZ-means_expand
In [10]:
XtX = XZ_demeaned.dot(XZ_demeaned.T)
XtXi = np.linalg.inv(XtX)
Contrast 1: stimulus 1 vs. baseline
In [11]:
C = np.matrix([1,0])
eff1 = float(1/(C*np.matrix(XtXi)*C.T))
print(eff1)
Contrast 2: stimulus 1 vs. stimulus 2
In [12]:
C = np.matrix([1,-1])
eff2 = float(1/(C*np.matrix(XtXi)*C.T))
print(eff2)
Contrast 1: stimulus 1 vs. baseline & stimulus 2 vs. baseline
In [13]:
C = np.matrix([[0,1],[1,0]])
eff3 = C.shape[1]/np.trace(C*np.matrix(XtXi)*C.T)
print(eff3)
Save results
In [14]:
Results = {}
Results['design_1']={
'contrastX0':eff1,
'contrastX0vsX1':eff2,
'contrastX0X1':eff3
}
Matrices = {}
Matrices['design_1'] = XZ_demeaned
pprint.pprint(Results)
We will now generate a design matrix for the same experiment with 100 trials. We will now sample the ITI from a uniform distribution (3-5s):
| stim | ITI | stim | ITI | stim | ITI | stim | ITI | stim | ITI |
|--2s--|----3-5s----|--2s--|----3-5s----|--2s--|----3-5s----|--2s--|----3-5s----|--2s--|----3-5s----|
We will immediately sample in resolution units (otherwise there are only 3 possible values for ITI: [3,4,5] )
In [15]:
success = False
while success == False:
# sample ITI's from uniform distribution
ITI = np.random.uniform(3,5,ntrials-1)+stimDur_s
onsets = np.append([0],np.cumsum(ITI))
# round onsets to seconds
onsets_s = np.floor(onsets/resolution)*resolution
# from onsets in seconds to timepoints
onsets = onsets_s/resolution
# due to the random proces, it could be that the total design is longer than you'd want
# if this happens: draw a new sample of ITI's
if np.max(onsets_s)<totalDur_s:
success = True
print("onsets: "+str(onsets_s))
In [16]:
X = np.zeros([2,int(totalDur)])
XZ_highres = np.zeros([2,int(totalDur)])
for stim in range(2):
ind = [int(x) for x in onsets[order==stim]]
X[stim,ind] = 1
for shift in np.arange(1,int(stimDur)):
ind = [x+shift for x in onsets[order==stim]]
X[stim,ind] = 1
XZ_highres[stim,:] = np.convolve(X[stim,:],hrf25)[:X.shape[1]]
#downsample
TRid = np.arange(0,totalDur,TR)
XZ = XZ_highres[:,TRid]
#demean
means = XZ.dot(np.ones(XZ.shape[1]).T)/XZ.shape[1]
means_expand = np.outer(means,np.ones(XZ.shape[1]))
XZ_demeaned = XZ-means_expand
plt.figure(figsize=(18,3))
plt.plot(XZ_demeaned.T)
Out[16]:
In [17]:
XtX = XZ_demeaned.dot(XZ_demeaned.T)
XtXi = np.linalg.inv(XtX)
C = np.matrix([1,0])
eff1 = float(1/(C*np.matrix(XtXi)*C.T))
C = np.matrix([1,-1])
eff2 = float(1/(C*np.matrix(XtXi)*C.T))
C = np.matrix([[0,1],[1,0]])
eff3 = C.shape[1]/np.trace(C*np.matrix(XtXi)*C.T)
Results['design_2']={
'contrastX0':eff1,
'contrastX0vsX1':eff2,
'contrastX0X1':eff3
}
Matrices['design_2'] = XZ_demeaned
pprint.pprint(Results)
We will now generate a design matrix for the same experiment with 100 trials. We will take the ITI's from before, but randomize the order.
| stim | ITI | stim | ITI | stim | ITI | stim | ITI | stim | ITI |
|--2s--|----3-5s----|--2s--|----3-5s----|--2s--|----3-5s----|--2s--|----3-5s----|--2s--|----3-5s----|
In [18]:
order = np.random.binomial(1,0.5,100)
print("random order of stimuli: \n"+str(order))
In [19]:
X = np.zeros([2,int(totalDur)])
XZ_highres = np.zeros([2,int(totalDur)])
for stim in range(2):
ind = [int(x) for x in onsets[order==stim]]
X[stim,ind] = 1
for shift in np.arange(1,int(stimDur)):
ind = [x+shift for x in onsets[order==stim]]
X[stim,ind] = 1
XZ_highres[stim,:] = np.convolve(X[stim,:],hrf25)[:X.shape[1]]
#downsample
TRid = np.arange(0,totalDur,TR)
XZ = XZ_highres[:,TRid]
#demean
means = XZ.dot(np.ones(XZ.shape[1]).T)/XZ.shape[1]
means_expand = np.outer(means,np.ones(XZ.shape[1]))
XZ_demeaned = XZ-means_expand
plt.figure(figsize=(18,3))
plt.plot(XZ_demeaned.T)
Out[19]:
In [20]:
XtX = XZ_demeaned.dot(XZ_demeaned.T)
XtXi = np.linalg.inv(XtX)
C = np.matrix([1,0])
eff1 = float(1/(C*np.matrix(XtXi)*C.T))
C = np.matrix([1,-1])
eff2 = float(1/(C*np.matrix(XtXi)*C.T))
C = np.matrix([[0,1],[1,0]])
eff3 = C.shape[1]/np.trace(C*np.matrix(XtXi)*C.T)
Results['design_3']={
'contrastX0':eff1,
'contrastX0vsX1':eff2,
'contrastX0X1':eff3
}
Matrices['design_3'] = XZ_demeaned
pprint.pprint(Results)
In [21]:
for des in ["design_1","design_2","design_3"]:
plt.figure(figsize=(18,3))
plt.plot(Matrices[des].T)
plt.ylim([-0.35,0.35])
In [22]:
eff = []
best = 0
worst = 100
for sim in range(50000):
order = np.random.binomial(1,0.5,100)
if sim == 0:
order = np.array([0]*int(ntrials/2)+[1]*int(ntrials/2))
X = np.zeros([2,int(totalDur)])
XZ_highres = np.zeros([2,int(totalDur)])
for stim in range(2):
ind = [int(x) for x in onsets[order==stim]]
X[stim,ind] = 1
for shift in np.arange(1,int(stimDur)):
ind = [x+shift for x in onsets[order==stim]]
X[stim,ind] = 1
XZ_highres[stim,:] = np.convolve(X[stim,:],hrf25)[:X.shape[1]]
#downsample
TRid = np.arange(0,totalDur,TR)
XZ = XZ_highres[:,TRid]
#demean
means = XZ.dot(np.ones(XZ.shape[1]).T)/XZ.shape[1]
means_expand = np.outer(means,np.ones(XZ.shape[1]))
XZ_demeaned = XZ-means_expand
#compute efficiency
XtX = XZ_demeaned.dot(XZ_demeaned.T)
XtXi = np.linalg.inv(XtX)
C = np.matrix([[1,-1]])
efficiency = 1/np.trace(C*np.matrix(XtXi)*C.T)
if sim == 0:
blocked = XZ_demeaned
blocked_eff = efficiency
eff.append(efficiency)
if efficiency > best:
bestdes = XZ_demeaned
best = efficiency
if efficiency < worst:
worstdes = XZ_demeaned
worst = efficiency
In [23]:
sns.distplot(eff)
print(blocked_eff)
print(np.max(eff))
In [24]:
plt.figure(figsize=(18,3))
plt.plot(blocked.T)
plt.title("blocked design, efficiency = "+str(blocked_eff))
plt.ylim([-0.35,0.35])
plt.figure(figsize=(18,3))
plt.plot(bestdes.T)
plt.title("best design, efficiency = "+str(best))
plt.ylim([-0.35,0.35])
plt.figure(figsize=(18,3))
plt.plot(worstdes.T)
plt.title("worst design, efficiency = "+str(worst))
plt.ylim([-0.35,0.35])
Out[24]: