ManuscriptS1b - Network-to-network information transfer mapping

Analysis for Supplementary Figures 1C-E.

Master code for Ito et al., 2017

Takuya Ito (takuya.ito@rutgers.edu)


In [52]:
import sys
sys.path.append('utils/')
import numpy as np
import loadGlasser as lg
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels.sandbox.stats.multicomp as mc
import multiprocessing as mp
import multregressionconnectivity as mreg
import scripts3_functions as func
%matplotlib inline
import os
os.environ['OMP_NUM_THREADS'] = str(1)
import warnings
warnings.filterwarnings('ignore')
import permutationTesting as pt


from matplotlib.colors import Normalize


class MidpointNormalize(Normalize):
    def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
        self.midpoint = midpoint
        Normalize.__init__(self, vmin, vmax, clip)

    def __call__(self, value, clip=None):
        # I'm ignoring masked values and all kinds of edge cases to make a
        # simple example...
        x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
        return np.ma.masked_array(np.interp(value, x, y))

0.0 Basic parameters


In [53]:
# Set basic parameters
basedir = '/projects2/ModalityControl2/'
datadir = basedir + 'data/'
resultsdir = datadir + 'resultsMaster/'
runLength = 4648

subjNums = ['032', '033', '037', '038', '039', '045', 
            '013', '014', '016', '017', '018', '021', 
            '023', '024', '025', '026', '027', '031', 
            '035', '046', '042', '028', '048', '053', 
            '040', '049', '057', '062', '050', '030', '047', '034']

glasserparcels = lg.loadGlasserParcels()
networkdef = lg.loadGlasserNetworks()


networkmappings = {'fpn':7, 'vis':1, 'smn':2, 'con':3, 'dmn':6, 'aud1':8, 'aud2':9, 'dan':11}
# Force aud2 key to be the same as aud1 (merging two auditory networks)
aud2_ind = np.where(networkdef==networkmappings['aud2'])[0]
networkdef[aud2_ind] = networkmappings['aud1']
# Define new network mappings with no aud1/aud2 distinction
networkmappings = {'fpn':7, 'vis':1, 'smn':2, 'con':3, 'dmn':6, 'aud':8, 'dan':11}

1.0 Compute network-to-network activity flow mapping

1.1 Set up basic ActFlow functions


In [54]:
def computeRestFC(subj):
    """
    Computes the Glasser parcellated restFC of a given subject
    """
    filename = resultsdir + 'glmRest_GlasserParcels/' + subj + '_rest_nuisanceResids_Glasser.csv'
    timeseries = np.loadtxt(filename, delimiter=',')
    fcmat = mreg.multregressionconnectivity(timeseries)
    return fcmat

def loadBetas(subj, net='all'):
    """Loads in task betas"""
    datafile = resultsdir + 'glmMiniblockBetaSeries/' + subj + '_miniblock_taskBetas_Glasser.csv'
    betas = np.loadtxt(datafile, delimiter=',')
    betas = betas[:,17:]
    if net == 'all':
        return betas.T
    else:
        net_ind = np.where(networkdef==net)[0]
        return betas[net_ind,:].T
    
def computeNetworkToNetworkActFlow(subj,fcmat):
    """
    Given a subject number and a resting-state FC Matrix, compute all possible network-to-network ActFlow predictions
    """
    outdir = resultsdir + 'ManuscriptS1_NetworkIEandITE_Analysis/ActFlowNetworkToNetwork/'
    betas  = loadBetas(subj, net='all')
    
    for net1 in networkmappings:
        for net2 in networkmappings:
            if net1==net2: continue
#             print 'ActFlow:', net1, 'to', net2
            # Find parcels for net1 and reshape
            net1_ind = np.where(networkdef==networkmappings[net1])[0]
            net1_ind.shape = (len(net1_ind),1)
            # Find parcels for net2 and reshape
            net2_ind = np.where(networkdef==networkmappings[net2])[0]
            net2_ind.shape = (len(net2_ind),1)
            
            # Compute betas for net1 and net2
            net1beta = np.squeeze(betas[:,net1_ind.T]) # Reformat to miniblocks x parcels
            net2beta = np.squeeze(betas[:,net2_ind.T]) # Reformat to miniblocks x parcels
            
            # Compute FC for net1 to net2
            w12 = fcmat[net1_ind,net2_ind.T].copy()
            w21 = fcmat[net2_ind,net1_ind.T].copy()
            
            # Randomize topology
#             np.random.shuffle(w12)
#             np.random.shuffle(w21)
            
            # Now perform network-to-network actflow
            net1tonet2flow = np.dot(net1beta,w12)
            net2tonet1flow = np.dot(net2beta,w21)
            
            # Save results to disk
            filename12 = subj + '_' + net1 + 'TO' + net2 + '_actflow_GlasserParcels.csv'
            np.savetxt(outdir + filename12,net1tonet2flow,delimiter=',')
            filename21 = subj + '_' + net2 + 'TO' + net1 + '_actflow_GlasserParcels.csv'
            np.savetxt(outdir + filename21,net2tonet1flow,delimiter=',')
            
def ActFlowSubj(subj):
    """
    Parcel-level ActFlow Pipeline
    """
    outdir = resultsdir + '/MultRegConnRestFC_GlasserParcels/'
    outfile = subj + '_multregconn_restfc.csv'
    fcmat = np.loadtxt(outdir + outfile, delimiter=',')
#     fcmat = np.arctanh(fcmat)
    computeNetworkToNetworkActFlow(subj,fcmat)

1.2 Estimate FC using multiple linear regression

1.2.1 Run MultRegConnFC (rest) and save to disk since it takes a while


In [56]:
for subj in subjNums:
    print 'Computing MultRegConn FC for subject', subj
    fcmat = computeRestFC(subj)
    outdir = resultsdir + '/MultRegConnRestFC_GlasserParcels/'
    outfile = subj + '_multregconn_restfc.csv'
    np.savetxt(outdir + outfile, fcmat, delimiter=',')

1.3 Run network-to-network activity flow mapping


In [57]:
for subj in subjNums:
#     print 'Running ActFlow on subject', subj
    ActFlowSubj(subj)

2.0 Predicted-to-actual similarity analysis on activity flow predictions

2.0.1 Set up basic functions for ActFlowStatistics


In [58]:
def getActFlowActivity(subj, fromnet, tonet):
    datadir = resultsdir + 'ManuscriptS1_NetworkIEandITE_Analysis/ActFlowNetworkToNetwork/'
    datafile = datadir+subj+'_'+fromnet+'TO'+tonet+'_actflow_GlasserParcels.csv'
    activity = np.loadtxt(datafile, delimiter=',')
    return activity
                       
def setUpActFlowMatrix(subj,ruledim,fromnet,tonet):
    """
    Sets up basic SVM matrix for a classification of a particular rule dimension and network for ActFlow
    """
    activity = getActFlowActivity(subj,fromnet,tonet)
    rules, rulesmb = func.importRuleTimingsV3(subj,ruledim)
    
    svm_mat = np.zeros((activity.shape))
    samplecount = 0
    labels = []
    for rule in rulesmb:
        rule_ind = rulesmb[rule].keys()
        sampleend = samplecount + len(rule_ind)
        svm_mat[samplecount:sampleend,:] = activity[rule_ind,:]
        labels.extend(np.ones(len(rule_ind),)*rule)
        samplecount += len(rule_ind)
        
    labels = np.asarray(labels)
    
    return svm_mat, labels
    
def setUpRSAMatrix(subj,ruledim,net):
    """
    Sets up basic SVM Matrix for a classification of a particular rule dimension and network
    """
    
    betas = loadBetas(subj,net=net)
    rules, rulesmb = func.importRuleTimingsV3(subj,ruledim)
    
    svm_mat = np.zeros((betas.shape))
    samplecount = 0
    labels = []
    for rule in rulesmb:
        rule_ind = rulesmb[rule].keys()
        sampleend = samplecount + len(rule_ind)
        svm_mat[samplecount:sampleend,:] = betas[rule_ind,:]
        labels.extend(np.ones(len(rule_ind),)*rule)
        samplecount += len(rule_ind)
        
    labels = np.asarray(labels)
    
    return svm_mat, labels

2.1 Define basic functions for ActFlow RSAs


In [59]:
def actFlowRSA_CV(rsa_actflow, rsa_real, actflow_labels, real_labels):
    """
    For each unique label (each unique rule), compute the spatial Spearman correlation for each trial 
    CV Approach: When computing the prototypes of each rule/condition, leave out the trial in the prototype which we are comparing

    """
# #     # Normalize data
#     rsa_actflow = preprocessing.scale(rsa_actflow,axis=0)
#     rsa_real = preprocessing.scale(rsa_real,axis=0)
    
    nrules = len(np.unique(actflow_labels))
    ncvs = len(real_labels)/nrules 

    # Set up an array index of every rule
    rule_ind = {}
    for rule in np.unique(real_labels): 
        rule_ind[rule] = np.where(actflow_labels==rule)[0]
        np.random.shuffle(rule_ind[rule])

    # Compare each actflow trial to all prototypes
    ntrials = rsa_actflow.shape[0]
    rsa_mat = np.zeros((ntrials,nrules))

    for cv in range(ncvs):
        
        # Obtain the *real* prototypes for each of the rules, but leave out the current trial (cv value)
        ind_tmp = {}
        rules_realprototype = {}
        for rule in np.unique(actflow_labels):
            ind_tmp[rule] = rule_ind[rule]
            ind_tmp[rule] = np.delete(ind_tmp[rule],cv) # Delete the current trial from prototype calculation
            
            # Average across trials to obtain prototype
            rules_realprototype[rule] = np.mean(rsa_real[ind_tmp[rule],:],axis=0) 
        
        # Only run similarity analysis for current CV trial, and compare against prototype in which current CV trial is not included
        for rule1 in rule_ind:
            for rule2 in rule_ind:
                rsa_mat[rule_ind[rule1][cv],rule2-1] = np.arctanh(stats.spearmanr(rsa_actflow[rule_ind[rule1][cv],:],rules_realprototype[rule2])[0])


    # Now obtain average RSAs for each pairwise comparison (rsa summary matrix)
    rsa_mat_avg = np.zeros((nrules,nrules))
    for rule1 in rule_ind:
        for rule2 in rule_ind:
            rsa_mat_avg[rule1-1,rule2-1] = np.mean(rsa_mat[rule_ind[rule1],rule2-1],axis=0)
    
    return rsa_mat_avg

def subjRSACV((subj,ruledim,fromnet,tonet)):
    
    realmat, reallabels = setUpRSAMatrix(subj,ruledim,networkmappings[tonet])
    actflowmat, actflowlabels = setUpActFlowMatrix(subj,ruledim,fromnet,tonet)
    
    # Demean each sample real data
    realmean = np.mean(realmat,axis=1)
    realmean.shape = (len(realmean),1)
    realmat = realmat - realmean
    # Demean each sample actflow data
    actflowmean = np.mean(actflowmat,axis=1)
    actflowmean.shape = (len(actflowmean),1)
    actflowmat = actflowmat - actflowmean
    
    rsamat = actFlowRSA_CV(actflowmat, realmat, actflowlabels, reallabels)
    # Create a mask to extract out diagonal (correct RSA)
    diag_mask = np.zeros((rsamat.shape),dtype=bool)
    np.fill_diagonal(diag_mask,True)
    # Create a mask to extract ouf off-diagonal (incorrect RSA)
    offdiag_mask = np.ones((rsamat.shape),dtype=bool)
    np.fill_diagonal(offdiag_mask,False)
    corr_rho = np.mean(rsamat[diag_mask])
    err_rho = np.mean(rsamat[offdiag_mask])
    diff_rho = corr_rho - err_rho
    return corr_rho, err_rho, diff_rho

2.2 Run predicted-to-actual similarity (RSA) using multiprocessing

Keep track of networks to matrix indices

In [60]:
# Remake order to fit that of the glasser community ordering (region-to-region matrix ordering)
netkeys = {0:'vis',1:'smn',2:'con',3:'dmn',4:'fpn', 5:'aud', 6:'dan'}
print netkeys
num_networks=len(netkeys)


{0: 'vis', 1: 'smn', 2: 'con', 3: 'dmn', 4: 'fpn', 5: 'aud', 6: 'dan'}

In [61]:
baseline = 0.0
ruledims = ['logic','sensory','motor']
numnets = len(networkmappings)
output = {}
avg_rho = {}
tstats = {}
pvals = {}
corr_rho = {}
err_rho = {}
diff_rho ={}
for ruledim in ruledims:
    output[ruledim] = np.zeros((num_networks,num_networks,len(subjNums)))
    avg_rho[ruledim] = np.zeros((num_networks,num_networks))
    tstats[ruledim] = np.zeros((num_networks,num_networks))
    pvals[ruledim] = np.zeros((num_networks,num_networks))
    corr_rho[ruledim] = np.zeros((num_networks,num_networks,len(subjNums)))
    err_rho[ruledim] = np.zeros((num_networks,num_networks,len(subjNums)))
    diff_rho[ruledim] = np.zeros((num_networks,num_networks,len(subjNums)))
    
    print 'Running', ruledim
    for net1 in range(num_networks):
        
        for net2 in range(num_networks):
            # Skip if net1 and net2
            if net1==net2: 
                avg_rho[ruledim][net1,net2] = np.nan
                tstats[ruledim][net1,net2] = np.nan
                pvals[ruledim][net1,net2] = np.nan
                corr_rho[ruledim][net1,net2] = np.nan
                err_rho[ruledim][net1,net2] = np.nan
                diff_rho[ruledim][net1,net2] = np.nan
                continue
            
            inputs = []
            for subj in subjNums: inputs.append((subj,ruledim,netkeys[net1],netkeys[net2]))
            # Run in parallel
            pool = mp.Pool(processes=16)
            results = pool.map_async(subjRSACV,inputs).get()
            pool.close()
            pool.join()    
            
            
            i = 0
            for result in results:

                tmp_corr, tmp_err, tmp_diff = result
                corr_rho[ruledim][net1,net2,i] = tmp_corr
                err_rho[ruledim][net1,net2,i] = tmp_err
                diff_rho[ruledim][net1,net2,i] = tmp_diff
                i += 1
                
            # Store results
            avg_rho[ruledim][net1,net2] = np.mean(diff_rho[ruledim][net1,net2,:])
            output[ruledim][net1,net2,:] = diff_rho[ruledim][net1,net2,:]
            t, p = stats.ttest_rel(corr_rho[ruledim][net1,net2,:],
                                                err_rho[ruledim][net1,net2,:])
            # One-sided t-test
            tstats[ruledim][net1,net2] = t
            if t>0:
                p=p/2.0
            else:
                p = 1-p/2.0
            pvals[ruledim][net1,net2] = p


Running logic
Running sensory
Running motor

2.3 Compute group statistics


In [62]:
# Compute group stats
baseline = 0.0
triu_indices = np.triu_indices(len(networkmappings),k=1)
tril_indices = np.tril_indices(len(networkmappings),k=-1)

qmat = {}
sigflow = {}
sig_rho = {}
tstat_sig = {}
for ruledim in ruledims:

    
    qmat[ruledim] = np.zeros((num_networks,num_networks))
    sigflow[ruledim] = np.zeros((num_networks,num_networks))
    sig_rho[ruledim] = np.zeros((num_networks,num_networks))
    tstat_sig[ruledim] = np.zeros((num_networks,num_networks))
    
    tmpq = []
    tmpq.extend(pvals[ruledim][triu_indices])
    tmpq.extend(pvals[ruledim][tril_indices])
    tmpq = mc.fdrcorrection0(tmpq)[1]
    qmat[ruledim][triu_indices] = tmpq[0:len(triu_indices[0])]
    qmat[ruledim][tril_indices] = tmpq[len(triu_indices[0]):]
    # Find significant values only
    sigflow[ruledim] = qmat[ruledim] < 0.05
    sig_rho[ruledim] = np.multiply(avg_rho[ruledim], sigflow[ruledim])
    tstat_sig[ruledim] = np.multiply(tstats[ruledim], sigflow[ruledim])

2.4 Plot results


In [63]:
for ruledim in ruledims:
    fig = plt.figure()
    plt.title('NetworkToNetwork InformationTransfer\n' + ruledim + ' (FDR)', fontsize=18, y=1.04)
    mat = tstat_sig[ruledim]
    np.fill_diagonal(mat,0)
    norm = MidpointNormalize(midpoint=0)
    plt.imshow(mat, origin='lower',norm=norm, vmin=0, cmap='seismic', interpolation='none')
    plt.xticks(netkeys.keys(),netkeys.values())
    plt.yticks(netkeys.keys(),netkeys.values())
    plt.ylabel('Source Network',fontsize=16)
    plt.xlabel('Target Network',fontsize=16)
    plt.tight_layout()
    plt.colorbar()
#     plt.savefig('SFig1_NetworkToNetworkInformationTransfer_' + ruledim + '.pdf')


3.0 Compute significance using FWE correction

3.1 Compute group statistics


In [69]:
fwe_Ts = np.zeros((len(networkmappings),len(networkmappings),len(ruledims)))
fwe_Ps = np.ones((len(networkmappings),len(networkmappings),len(ruledims)))

    
indices = np.ones((len(networkmappings),len(networkmappings)))
np.fill_diagonal(indices,0)
flatten_ind = np.where(indices==1)
    
rulecount = 0
for ruledim in ruledims:
    t, p = pt.permutationFWE(diff_rho[ruledim][flatten_ind[0],flatten_ind[1],:], permutations=1000, nproc=15)
    fwe_Ts[flatten_ind[0],flatten_ind[1],rulecount] = t
    fwe_Ps[flatten_ind[0],flatten_ind[1],rulecount] = 1.0 - p
    
    rulecount += 1

In [70]:
pthresh = .05

# Visualize FWER-corrected T-statistic map
rulecount = 0
for ruledim in ruledims:
    
    # Thresholded T-stat map
    plt.figure()
    # First visualize unthresholded
    mat = fwe_Ts[:,:,rulecount]
    thresh = fwe_Ps[:,:,rulecount] < pthresh
    mat = np.multiply(mat,thresh)
    ind = np.isnan(mat)
    mat[ind]=0
    pos = mat > 0
    mat = np.multiply(pos,mat)
    norm = MidpointNormalize(midpoint=0)
    plt.imshow(mat,origin='lower',norm=norm,vmin = 0,interpolation='none',cmap='seismic')
    plt.colorbar(fraction=0.046)
    plt.title('NetworkToNetwork InformationTransfer\n' + ruledim + ' (FWE)', fontsize=18, y=1.04)
    plt.xticks(netkeys.keys(),netkeys.values())
    plt.yticks(netkeys.keys(),netkeys.values())
    plt.ylabel('Source Network',fontsize=16)
    plt.xlabel('Target Network',fontsize=16)
    plt.tight_layout()
#     plt.savefig('SFig1_NetworkToNetworkInformationTransfer_' + ruledim + '_FWER.pdf')

    
    # Print out statistics for manuscript
    sig_ind = thresh
    nonsig_ind = thresh==False
    np.fill_diagonal(nonsig_ind,False) # Make sure diagonal is not included
    
    print ruledim, 'results'
    print 'Average significant ITE effect =', np.mean(avg_rho[ruledim][sig_ind])
    print 'Average significant T-stat =', np.mean(fwe_Ts[:,:,rulecount][sig_ind])
    print 'Maximum significant Pfwe =', np.max(fwe_Ps[:,:,rulecount][sig_ind])
    print '\n'
    print 'Average non-significant ITE effect =', np.mean(avg_rho[ruledim][nonsig_ind])
    print 'Average non-significant T-stat =', np.mean(fwe_Ts[:,:,rulecount][nonsig_ind])
    print 'Minimum non-significant Pfwe =', np.min(fwe_Ps[:,:,rulecount][nonsig_ind])
    print '\n\n'
    
    rulecount += 1


logic results
Average significant ITE effect = 0.00863840691587
Average significant T-stat = 4.72892244337
Maximum significant Pfwe = 0.015


Average non-significant ITE effect = 0.00139893089882
Average non-significant T-stat = 0.997202974496
Minimum non-significant Pfwe = 0.109



sensory results
Average significant ITE effect = 0.00613530901641
Average significant T-stat = 4.00949241597
Maximum significant Pfwe = 0.046


Average non-significant ITE effect = 0.000773893243119
Average non-significant T-stat = 0.683210812105
Minimum non-significant Pfwe = 0.107



motor results
Average significant ITE effect = 0.0106989478558
Average significant T-stat = 5.36757882387
Maximum significant Pfwe = 0.001


Average non-significant ITE effect = 0.000891599875751
Average non-significant T-stat = 0.774377879108
Minimum non-significant Pfwe = 0.355




In [ ]: