where $x_i$ is the activity of region $i$, $\tau_{i}$ is the time constant for region $i$, $s$ is the recurrent (local) coupling, $g$ is the global coupling parameter, $\phi$ is the bounded transfer function (in this scenario is the hyperbolic tangent), $W_{xy}$ is the synaptic connectivity matrix, and $I$ is the task-stimulation (if any).
In [23]:
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
% matplotlib inline
import scipy.stats as stats
import statsmodels.api as sm
import CompModel_v7 as cm
cm = reload(cm)
import multiprocessing as mp
import sklearn.preprocessing as preprocessing
import sklearn.svm as svm
import statsmodels.sandbox.stats.multicomp as mc
import multregressionconnectivity as mreg
import sys
sys.path.append('utils/')
import permutationTesting as pt
import os
os.environ['OMP_NUM_THREADS'] = str(1)
from matplotlib.colors import Normalize
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
In [24]:
# Basic simulation parameters
# Simulation parameters sampled at 10ms
Tmax = 10000 # 100 seconds per block (10 trials perblock, each trial lasts 4 seconds)
Tmaxrest = 60000 # 500 seconds of rest
nblocks = 20
# Network parameters
g = 1.0
s = 1.0
nsubjs = 30
ncommunities = 5
nodespernetwork = 50
totalnodes = ncommunities*nodespernetwork
localtasks = range(1,5)
flexhubtasks = range(5,9)
flexandlocalnets = range(9,13)
flexandlocalnet2 = range(13,17)
ntasks = len(localtasks) + len(flexhubtasks) + len(flexandlocalnets) + len(flexandlocalnet2)
In [25]:
nblocks = 20
## First four tasks are local tasks
localtasks = range(1,5)
localcommunity = 4 # local community to stimulate the local tasks
## Second four tasks are 'flexhub' tasks
flexhubtasks = range(5,9)
ntasks = len(flexhubtasks) + len(localtasks)
Tmax = 10000
Tmaxrest = 60000
# g = 1.0
# s = 1.0
autocorrfactor = 0
samplingrate = 1.0
TRLength=100
#### Set up subject networks ####
# Parameters for subject's networks
ncommunities = 5
innetwork_dsity = .35
outnetwork_dsity = .05
hubnetwork_dsity = .20
nodespernetwork = 50
totalnodes = nodespernetwork*ncommunities
##########
# Construct structural matrix
W = cm.generateStructuralNetwork(ncommunities=ncommunities, innetwork_dsity=innetwork_dsity,
outnetwork_dsity=outnetwork_dsity, hubnetwork_dsity=hubnetwork_dsity,
nodespernetwork=nodespernetwork, showplot=False)
# Construct synaptic matrix
G = cm.generateSynapticNetwork(W, showplot=False)
plt.figure()
# norm = MidpointNormalize(midpoint=0)
plt.imshow(G,origin='lower',interpolation='none')
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Synaptic Weight Matrix', y=1.04, fontsize=18)
plt.colorbar()
# plt.savefig('SingleSubj_SynapticWeightMatrix.pdf')
Out[25]:
In [26]:
fcmat_pearson = np.zeros((totalnodes,totalnodes,nsubjs))
fcmat_multreg = np.zeros((totalnodes,totalnodes,nsubjs))
for subj in range(nsubjs):
indir = '/projects2/ModalityControl2/data/resultsMaster/Manuscript4_CompModelv7_resubmission/restfc/'
# Load in pearson FC matrix
filename1 = 'subj' + str(subj) + '_restfc_pearson.txt'
fcmat_pearson[:,:,subj] = np.loadtxt(indir + filename1, delimiter=',')
# Loda in multreg FC matrix
filename2 = 'subj' + str(subj) + '_restfc_multreg.txt'
fcmat_multreg[:,:,subj] = np.loadtxt(indir + filename2, delimiter=',')
plt.figure()
avg = np.mean(fcmat_pearson,axis=2)
np.fill_diagonal(avg,0)
plt.imshow(avg ,origin='lower',interpolation='none')#,vmin=0)
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Group Rest FC Matrix\nPearson FC', y=1.04, fontsize=18)
plt.colorbar()
#plt.savefig('Fig1a_CompModel5_GroupRestFC_Pearson.pdf')
plt.figure()
avg = np.mean(fcmat_multreg,axis=2)
np.fill_diagonal(avg,0)
# norm = MidpointNormalize(midpoint=0)
plt.imshow(avg ,origin='lower',interpolation='none')#,vmin=-.08,vmax=.08)
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Group Rest FC Matrix\nMultiple Regression FC', y=1.04, fontsize=18)
plt.colorbar()
# plt.savefig('Fig1b_CompModel5_GroupRestFC_MultReg.pdf')
Out[26]:
In [27]:
plt.figure()
avg = np.mean(fcmat_multreg,axis=2)
np.fill_diagonal(avg,0)
norm = MidpointNormalize(midpoint=0)
plt.imshow(avg, origin='lower',interpolation='none', cmap='OrRd',vmin=0)
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Group Rest FC Matrix\nMultiple Regression FC', y=1.04, fontsize=18)
plt.colorbar()
Out[27]:
In [28]:
def setUpActFlowRSAMat(subj,net,fromnet,tasks,nblocks=20,fc='multreg'):
"""
Retrieves actflow data from subject and puts it in an SVM ready format
tasks input -- an array or list of task numbers corresponding to which set of tasks you want to analyze
May want only local tasks or flexhub tasks
"""
nsamples = len(tasks)*nblocks
nfeatures = nodespernetwork # regions per network
svm_mat = np.zeros((nsamples,nfeatures))
labels = np.zeros((nsamples,))
indir = '/projects2/ModalityControl2/data/resultsMaster/Manuscript4_CompModelv7_resubmission/actflow_predictions/'
indcount = 0
for task in tasks:
if fc=='multreg':
filename = 'subj'+str(subj)+'_task'+str(task)+'_net'+str(fromnet)+'tonet'+str(net)+'_multregFC.txt'
elif fc=='pearson':
filename = 'subj'+str(subj)+'_task'+str(task)+'_net'+str(fromnet)+'tonet'+str(net)+'_pearsonFC.txt'
actflowdat = np.loadtxt(indir+filename,delimiter=',')
svm_mat[indcount:(indcount+nblocks),:] = actflowdat.T
labels[indcount:(indcount+nblocks)] = task
indcount += nblocks
return svm_mat, labels
def setUpBetasRSAMat(subj,net,tasks,nblocks=20):
"""
Retrieves in task beta from subject and puts it in an SVM ready format
tasks input -- an array or list of task numbers corresponding to which set of tasks you want to analyze
May want only local tasks or flexhub tasks
"""
nfeatures = nodespernetwork # Number of regions for each network
nsamples = len(tasks)*nblocks
svm_mat = np.zeros((nsamples,nfeatures))
labels =np.zeros((nsamples,))
net_ind = np.arange(net*nodespernetwork,net*nodespernetwork+nodespernetwork)
indir = '/projects2/ModalityControl2/data/resultsMaster/Manuscript4_CompModelv7_resubmission/task_betas/'
indcount = 0
for task in tasks:
filename = 'subj'+str(subj)+'_task'+str(task)+'_allblocks.txt'
betas = np.loadtxt(indir + filename, delimiter=',')
# Get relevant network data
svm_mat[indcount:(indcount+nblocks),:] = betas[net_ind,:].T # get all trials
labels[indcount:(indcount+nblocks)] = task
indcount += nblocks
return svm_mat, labels
In [29]:
def runActFlowRSA((subj,net,fromnet,tasks,nblocks,fc)):
"""
Runs a leave-block-out CV style SVM analysis (leaving 4 blocks out per CV)
Trains on predicted ActFlow data
Tests on real data (betas)
"""
actflow_mat, labels = setUpActFlowRSAMat(subj,net,fromnet,tasks,nblocks=nblocks,fc=fc)
real_mat, labels = setUpBetasRSAMat(subj,net,tasks,nblocks=nblocks)
# actflow_mat = preprocessing.scale(actflow_mat,axis=0)
# real_mat = preprocessing.scale(real_mat,axis=0)
ncvs = nblocks
indices = np.arange(actflow_mat.shape[0])
matched_rhos = []
mismatch_rhos = []
for cv in range(ncvs):
task_ind = {}
prototype = {}
# Construct prototypes of each task
for task in tasks:
# Get indices for this particular task
task_ind[task] = np.where(labels==task)[0]
# Decide which one is your 'comparison test trial' will be
test_ind = task_ind[task][cv]
# Find the indices for the prototypes
train_ind = np.setxor1d(test_ind,task_ind[task])
prototype[task] = np.mean(real_mat[train_ind,:],axis=0)
# Now compare each pair of tasks with the prototype
for task_a in tasks:
for task_b in tasks:
test_ind = task_ind[task_a][cv] # Compare task a
rho_tmp = stats.spearmanr(prototype[task_b].T,actflow_mat[test_ind,:].T)[0] # With task b
rho_tmp = np.arctanh(rho_tmp)
if task_a==task_b:
# Match!
matched_rhos.append(rho_tmp)
else:
mismatch_rhos.append(rho_tmp)
# Get averages
matched_rhos_avg = np.mean(matched_rhos)
mismatch_rhos_avg = np.mean(mismatch_rhos)
return matched_rhos_avg, mismatch_rhos_avg
In [30]:
# Empty variables for FlexHub task analysis
rho_mat_match_flexhub = np.zeros((ncommunities,ncommunities,nsubjs))
rho_mat_mismatch_flexhub = np.zeros((ncommunities,ncommunities,nsubjs))
for i in range(ncommunities):
for j in range(ncommunities):
if i==j: continue
fromnet = i
net = j
nblocks = nblocks
fc='multreg'
## First run on flexhub tasks
inputs = []
for subj in range(nsubjs): inputs.append((subj,net,fromnet,flexhubtasks,nblocks,fc))
# Run multiprocessing
pool = mp.Pool(processes=15)
results_flexhub = pool.map_async(runActFlowRSA, inputs).get()
pool.close()
pool.join()
## Get results
for subj in range(nsubjs):
match, mismatch = results_flexhub[subj]
rho_mat_match_flexhub[i,j,subj],rho_mat_mismatch_flexhub[i,j,subj] = match, mismatch
In [31]:
# Instantiate empty result matrices
tmat_flexhub = np.zeros((ncommunities,ncommunities))
pmat_flexhub = np.ones((ncommunities,ncommunities))
for i in range(ncommunities):
for j in range(ncommunities):
if i==j: continue
t, p = stats.ttest_rel(rho_mat_match_flexhub[i,j,:],rho_mat_mismatch_flexhub[i,j,:])
tmat_flexhub[i,j] = t
# One-sided p-value
if t > 0:
p = p/2.0
elif t < 0:
p = 1.0 - p/2.0
pmat_flexhub[i,j] = p
## FlexHub Tasks
# Run FDR correction on p-values (Don't get diagonal values)
qmat_flexhub = np.ones((ncommunities,ncommunities))
triu_ind = np.triu_indices(ncommunities,k=1)
tril_ind = np.tril_indices(ncommunities,k=-1)
all_ps = np.hstack((pmat_flexhub[triu_ind],pmat_flexhub[tril_ind]))
h, all_qs = mc.fdrcorrection0(all_ps)
# the first half of all qs belong to triu, second half belongs to tril
qmat_flexhub[triu_ind] = all_qs[:len(triu_ind[0])]
qmat_flexhub[tril_ind] = all_qs[len(tril_ind[0]):]
binary_mat_flexhub = qmat_flexhub < .05
rho_diff_mat_flexhub = np.mean(rho_mat_match_flexhub,axis=2) - np.mean(rho_mat_mismatch_flexhub,axis=2)
plt.figure()
threshold_acc = np.multiply(binary_mat_flexhub,tmat_flexhub)
norm = MidpointNormalize(midpoint=0)
plt.imshow(threshold_acc,norm=norm,origin='lower',interpolation='None',cmap='bwr')
plt.title('Network-to-Network Information Transfer\n(FDR-corrected)\nFlexHub Tasks -- MultReg FC',fontsize=16, y=1.02)
plt.colorbar()
plt.yticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.xticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.ylabel('Network ActFlow FROM',fontsize=15)
plt.xlabel('Network ActFlow TO',fontsize=15)
plt.tight_layout()
# plt.savefig('SFig_CompModel_Network2Network_RSA_MultRegFC_HubNetStim_.pdf')
In [32]:
# Instantiate empty result matrices
tfwe_flexhub = np.zeros((ncommunities,ncommunities))
pfwe_flexhub = np.ones((ncommunities,ncommunities))
ite_flexhub = rho_mat_match_flexhub - rho_mat_mismatch_flexhub
indices = np.ones((ncommunities,ncommunities))
np.fill_diagonal(indices,0)
flatten_ind = np.where(indices==1)
## FlexHub Tasks
t, p = pt.permutationFWE(ite_flexhub[flatten_ind[0],flatten_ind[1],:], permutations=1000, nproc=15)
p = 1.0 - p
np.fill_diagonal(pfwe_flexhub,1.0)
tfwe_flexhub[flatten_ind[0],flatten_ind[1]] = t
pfwe_flexhub[flatten_ind[0],flatten_ind[1]] = p
binary_mat_flexhub = pfwe_flexhub < 0.05
# Print statistics to place in text of paper
# Compute average t-value of hub-network transfers
sig_ind = pfwe_flexhub<0.05
nonsig_ind = pfwe_flexhub>0.05
print 'Average significant T-value:', np.mean(tfwe_flexhub[sig_ind])
print 'Maximum significant P-value:', np.max(pfwe_flexhub[sig_ind])
print 'Average significant ITE:', np.mean(ite_flexhub[sig_ind])
print '\n'
print 'Average non-significant T-value:', np.mean(tfwe_flexhub[nonsig_ind])
print 'Average non-significant P-value:', np.mean(pfwe_flexhub[nonsig_ind])
print 'Average non-significant ITE:', np.mean(ite_flexhub[nonsig_ind])
ite_mat_flexhub = np.mean(rho_mat_match_flexhub,axis=2) - np.mean(rho_mat_mismatch_flexhub,axis=2)
plt.figure()
threshold_acc = np.multiply(binary_mat_flexhub,tmat_flexhub)
norm = MidpointNormalize(midpoint=0)
plt.imshow(threshold_acc,norm=norm,origin='lower',interpolation='None',cmap='bwr')
plt.title('Network-to-Network Information Transfer\n(FWE-corrected)\nFlexHub Tasks -- MultReg FC',fontsize=16, y=1.02)
plt.colorbar()
plt.yticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.xticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.ylabel('Network ActFlow FROM',fontsize=15)
plt.xlabel('Network ActFlow TO',fontsize=15)
plt.tight_layout()
# plt.savefig('SFig_CompModel_Network2Network_RSA_MultRegFC_HubNetStim_.pdf')
In [33]:
outofnet_intrinsicFC = np.zeros((ncommunities,nsubjs))
indices = np.arange(nodespernetwork*ncommunities)
for subj in range(nsubjs):
for net in range(0,ncommunities):
# if net == hubnet: continue
net_ind = np.arange(nodespernetwork*net,nodespernetwork*net + nodespernetwork)
net_ind.shape = (len(net_ind),1)
outofnet_ind = np.setxor1d(net_ind,indices)
outofnet_ind.shape = (len(outofnet_ind),1)
outofnet_intrinsicFC[net,subj] = np.mean(fcmat_multreg[net_ind, outofnet_ind.T, subj])
fcmean = np.mean(outofnet_intrinsicFC,axis=1)
fcerr = np.std(outofnet_intrinsicFC,axis=1)/np.sqrt(nsubjs)
fig = plt.bar(range(len(fcmean)), fcmean, yerr=fcerr)
# fig = plt.ylim([.09,0.10])
fig = plt.xticks(np.arange(.4,5.4,1.0),['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'],fontsize=14)
fig = plt.ylabel('Multiple Regression FC', fontsize=16)
fig = plt.xlabel('Networks', fontsize=16)
fig = plt.title("Out-of-Network (BGC) Intrinsic FC", fontsize=18, y=1.02)
fig = plt.tight_layout()
# pp2 = PdfPages('Fig1_CompModel_OutNetIntrinsicFC.pdf')
# pp2.savefig(fig)
# pp2.close()
In [34]:
pvals = []
tvals = []
hubnet = 0
for net in range(ncommunities):
if hubnet == net: continue
t, p = stats.ttest_rel(outofnet_intrinsicFC[hubnet,:],outofnet_intrinsicFC[net,:])
tvals.append(t)
pvals.append(p)
qvals = mc.fdrcorrection0(pvals)[1]
for net in range(ncommunities):
if net == hubnet:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5)
else:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5), '\t t =', round(tvals[net-1],3), '\t p =', round(pvals[net-1],3), '\t q =', round(qvals[net-1],3)
print 'Average t-value for hub network greater than local networks:', np.mean(tvals)
In [35]:
contrast = np.zeros((ncommunities-1,outofnet_intrinsicFC.shape[1]))
hubnet = 0
i = 0
for net in range(ncommunities):
if hubnet == net: continue
t, p = stats.ttest_rel(outofnet_intrinsicFC[hubnet,:],outofnet_intrinsicFC[net,:])
contrast[i,:] = outofnet_intrinsicFC[hubnet,:] - outofnet_intrinsicFC[net,:]
i += 1
t, p_fwe = pt.permutationFWE(contrast, permutations=1000, nproc=15)
p_fwe = 1.0 - p_fwe
qvals = mc.fdrcorrection0(pvals)[1]
for net in range(ncommunities):
if net == hubnet:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5)
else:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5), '\t t =', round(t[net-1],3), '\t p =', round(p_fwe[net-1],3)