In [302]:
# AUTHOR Christian Dansereau 2016

%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt

import pandas as pd
import scipy.io
import os
import nibabel as nib
from nibabel.affines import apply_affine
from nilearn import plotting
import numpy.linalg as npl

Load data


In [303]:
#seed_data = pd.read_csv('20160128_AD_Decrease_Meta_Christian.csv')

template_036= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale036.nii.gz')
template_020= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale020.nii.gz')
template_012= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale012.nii.gz')
template_007= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale007.nii.gz')

scale = '36'
flag_dmn = False

if scale == '7':
    template = template_007
else:
    template = template_036

#seed_data = pd.read_csv('20160404_AD_Decrease_Meta_DMN_nonDMN_Final.csv')
#seed_data = pd.read_csv('20160404_AD_Increase_Meta_DMN_nonDMN_Final.csv')

#seed_data = pd.read_csv('20160205_MCI_Decrease_Meta_DMN_nonDMN_Final.csv')
#seed_data = pd.read_csv('20160204_MCI_Increase_Meta_DMN_nonDMN_Final.csv')

#seed_data = pd.read_csv('20160404_ADMCI_Decrease_Meta_DMN_nonDMN_Final.csv')
seed_data = pd.read_csv('20160404_ADMCI_Increase_Meta_DMN_nonDMN_Final.csv')

if flag_dmn:
    #output_stats = 'AD_decrease_scale'+scale+'_stats_seedDMN.mat'
    #output_vol   = 'AD_decrease_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
    #output_stats = 'AD_increase_scale'+scale+'_stats_seedDMN.mat'
    #output_vol   = 'AD_increase_ratio_scale'+scale+'_vol_seedDMN.nii.gz'

    #output_stats = 'MCI_decrease_scale'+scale+'_stats_seedDMN.mat'
    #output_vol   = 'MCI_decrease_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
    #output_stats = 'MCI_increase_scale'+scale+'_stats_seedDMN.mat'
    #output_vol   = 'MCI_increase_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
    
    #output_stats = 'ADMCI_decrease_scale'+scale+'_stats_seedDMN.mat'
    #output_vol   = 'ADMCI_decrease_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
    output_stats = 'ADMCI_increase_scale'+scale+'_stats_seedDMN.mat'
    output_vol   = 'ADMCI_increase_ratio_scale'+scale+'_vol_seedDMN.nii.gz'
    
else:
    #output_stats = 'AD_decrease_scale'+scale+'_stats_nonDMN.mat'
    #output_vol   = 'AD_decrease_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
    #output_stats = 'AD_increase_scale'+scale+'_stats_seednonDMN.mat'
    #output_vol   = 'AD_increase_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'

    #output_stats = 'MCI_decrease_scale'+scale+'_stats_seednonDMN.mat'
    #output_vol   = 'MCI_decrease_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
    #output_stats = 'MCI_increase_scale'+scale+'_stats_seednonDMN.mat'
    #output_vol   = 'MCI_increase_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
    
    #output_stats = 'ADMCI_decrease_scale'+scale+'_stats_seednonDMN.mat'
    #output_vol   = 'ADMCI_decrease_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'
    output_stats = 'ADMCI_increase_scale'+scale+'_stats_seednonDMN.mat'
    output_vol   = 'ADMCI_increase_ratio_scale'+scale+'_vol_seednonDMN.nii.gz'

In [304]:
seed_data


Out[304]:
PMID Seed_cambridge Author Year subjects x y z Contrast Direction
0 18786570 5 Zhang 2009 32 -13.00 54.00 23.00 ADMCI Increase
1 18786570 5 Zhang 2009 32 -45.00 33.00 48.00 ADMCI Increase
2 18786570 5 Zhang 2009 32 -45.00 24.00 18.00 ADMCI Increase
3 18786570 5 Zhang 2009 32 -51.00 3.00 15.00 ADMCI Increase
4 18786570 5 Zhang 2009 32 57.00 42.00 24.00 ADMCI Increase
5 18786570 5 Zhang 2009 32 12.00 57.00 -15.00 ADMCI Increase
6 18786570 5 Zhang 2009 32 -33.00 -9.00 -36.00 ADMCI Increase
7 20656843 5 Zhang 2010 27 -33.00 -13.00 43.00 ADMCI Increase
8 20656843 5 Zhang 2010 27 -39.00 -15.00 42.00 ADMCI Increase
9 20656843 5 Zhang 2010 27 51.00 24.00 9.00 ADMCI Increase
10 20656843 5 Zhang 2010 27 15.00 9.00 51.00 ADMCI Increase
11 20656843 5 Zhang 2010 27 -12.00 39.00 54.00 ADMCI Increase
12 20656843 5 Zhang 2010 27 37.00 -85.00 30.00 ADMCI Increase
13 20656843 5 Zhang 2010 27 -52.00 -61.00 45.00 ADMCI Increase
14 20656843 5 Zhang 2010 27 -30.00 -12.00 -21.00 ADMCI Increase
15 20656843 5 Zhang 2010 28 -15.00 0.00 51.00 ADMCI Increase
16 20656843 5 Zhang 2010 28 -30.00 33.00 34.00 ADMCI Increase
17 20656843 5 Zhang 2010 28 15.00 6.00 51.00 ADMCI Increase
18 20656843 5 Zhang 2010 28 18.00 -13.00 58.00 ADMCI Increase
19 20656843 5 Zhang 2010 28 -42.00 48.00 24.00 ADMCI Increase
20 20656843 5 Zhang 2010 28 51.00 28.00 12.00 ADMCI Increase
21 20656843 5 Zhang 2010 28 52.00 20.00 28.00 ADMCI Increase
22 20656843 5 Zhang 2010 28 -45.00 -35.00 49.00 ADMCI Increase
23 20656843 5 Zhang 2010 28 57.00 -39.00 42.00 ADMCI Increase
24 19833321 5 Sheline 2010 83 7.91 -102.08 8.86 ADMCI Increase
25 20410145 6 Zhou 2010 24 12.00 32.00 30.00 ADMCI Increase
26 20410145 6 Zhou 2010 24 -12.00 34.00 16.00 ADMCI Increase
27 20410145 6 Zhou 2010 24 10.00 34.00 -6.00 ADMCI Increase
28 20410145 6 Zhou 2010 24 -26.00 -12.00 10.00 ADMCI Increase
29 20410145 6 Zhou 2010 24 -18.00 -2.00 10.00 ADMCI Increase
... ... ... ... ... ... ... ... ... ... ...
128 22451310 5 Liang 2012 32 9.00 -12.00 0.00 ADMCI Increase
129 22451310 6 Liang 2012 32 -15.00 -6.00 21.00 ADMCI Increase
130 22451310 6 Liang 2012 32 -6.00 12.00 3.00 ADMCI Increase
131 22451310 6 Liang 2012 32 -15.00 12.00 12.00 ADMCI Increase
132 22451310 6 Liang 2012 32 -6.00 -12.00 3.00 ADMCI Increase
133 22451310 6 Liang 2012 32 -9.00 -12.00 12.00 ADMCI Increase
134 22451310 6 Liang 2012 32 -45.00 -27.00 15.00 ADMCI Increase
135 22451310 6 Liang 2012 32 -39.00 -33.00 18.00 ADMCI Increase
136 22451310 6 Liang 2012 32 6.00 -12.00 15.00 ADMCI Increase
137 22451310 6 Liang 2012 32 15.00 -9.00 12.00 ADMCI Increase
138 22451310 6 Liang 2012 32 9.00 -12.00 0.00 ADMCI Increase
139 22451310 7 Liang 2012 32 -15.00 -6.00 21.00 ADMCI Increase
140 22451310 7 Liang 2012 32 -6.00 12.00 3.00 ADMCI Increase
141 22451310 7 Liang 2012 32 -15.00 12.00 12.00 ADMCI Increase
142 22451310 7 Liang 2012 32 -6.00 -12.00 3.00 ADMCI Increase
143 22451310 7 Liang 2012 32 -9.00 -12.00 12.00 ADMCI Increase
144 22451310 7 Liang 2012 32 -45.00 -27.00 15.00 ADMCI Increase
145 22451310 7 Liang 2012 32 -39.00 -33.00 18.00 ADMCI Increase
146 22451310 7 Liang 2012 32 6.00 -12.00 15.00 ADMCI Increase
147 22451310 7 Liang 2012 32 15.00 -9.00 12.00 ADMCI Increase
148 22451310 7 Liang 2012 32 9.00 -12.00 0.00 ADMCI Increase
149 25043909 2 Pasquini 2015 44 -30.00 -14.00 -26.00 ADMCI Increase
150 25547636 5 Gardini 2015 42 -1.88 -39.72 5.85 ADMCI Increase
151 25547636 5 Gardini 2015 42 4.47 -42.82 6.03 ADMCI Increase
152 25547636 5 Gardini 2015 42 42.33 -19.32 -29.10 ADMCI Increase
153 25745400 5 Yi 2015 32 -20.00 -70.00 16.00 ADMCI Increase
154 25745400 5 Yi 2015 32 22.00 -68.00 58.00 ADMCI Increase
155 25745400 5 Yi 2015 32 56.00 -50.00 14.00 ADMCI Increase
156 25745400 5 Yi 2015 32 -48.00 -72.00 18.00 ADMCI Increase
157 25745400 5 Yi 2015 32 0.00 -50.00 -2.00 ADMCI Increase

158 rows × 10 columns


In [305]:
seed_data[seed_data['Seed_cambridge']==5][['x','y','z']].values.shape


Out[305]:
(65, 3)

Get the number of coordinates reported for each network


In [306]:
from numpy.linalg import norm
# find the closest network to the coordo
def get_nearest_net(template,world_coor):
    list_coord = np.array(np.where(template.get_data()>0))
    mni_coord = apply_affine(template.get_affine(),list_coord.T)
    distances = norm(mni_coord-np.array(world_coor),axis=1)
    #print distances.shape
    idx_nearest_net = np.where(distances == np.min(distances))[0][0]
    return int(template.get_data()[list_coord[:,idx_nearest_net][0],list_coord[:,idx_nearest_net][1],list_coord[:,idx_nearest_net][2]])
    
#get_nearest_net(template,[-15,-10,-10])
# Convert from world MNI space to the EPI voxel space
def get_world2vox(template, mni_coord):
    return np.round(apply_affine(npl.inv(template.get_affine()),mni_coord)+[1])
    
network_votes = np.zeros((np.max(template.get_data().flatten()),1))[:,0]
network_votes

# get the voxel coordinates of the MNI seeds
if flag_dmn:
    seed_data = seed_data[seed_data['Seed_cambridge']==5]
else:
    seed_data = seed_data[seed_data['Seed_cambridge']!=5]
    
mni_space_targets = seed_data[['x','y','z']].values
vox_corrd = get_world2vox(template,mni_space_targets)
votes = []
n_outofbrain=0
for i in range(vox_corrd.shape[0]):
    net_class = template.get_data()[vox_corrd[i,0],vox_corrd[i,1],vox_corrd[i,2]]
    if net_class==0:
        n_outofbrain+=1
        votes.append(get_nearest_net(template,[mni_space_targets[i,0],mni_space_targets[i,1],mni_space_targets[i,2]]))
    else:
        votes.append(net_class)

print('Out of brain coordinates: '+ str(n_outofbrain))
votes = np.array(votes)    

# take one vote for each study only
uni_pmid = np.unique(seed_data['PMID'])
votes.shape
frequency_votes=np.zeros((len(uni_pmid),len(network_votes)))
#for i in range(len(uni_pmid)):
#    frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
for i in range(len(uni_pmid)):
    aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
    for j in aa:
        frequency_votes[i,j-1] = (aa == j).sum()/float(len(aa))
print frequency_votes


# compile the stats for each network
#for i in range(1,len(network_votes)+1):
#    network_votes[i-1] = np.mean(frequency_votes==i)
network_votes = np.mean(frequency_votes,axis=0)
print network_votes 
#vox_corrd[np.array(votes)==5,:]


Out of brain coordinates: 11
[[ 0.          0.15384615  0.          0.          0.          0.          0.
   0.30769231  0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.23076923  0.          0.          0.
   0.          0.          0.          0.          0.          0.
   0.15384615  0.          0.          0.          0.          0.          0.
   0.          0.15384615]
 [ 0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          1.          0.          0.
   0.        ]
 [ 0.          0.          0.          0.          0.          0.          0.
   0.72727273  0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.18181818  0.          0.          0.
   0.04545455  0.          0.          0.          0.          0.          0.
   0.04545455]
 [ 0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.5         0.          0.          0.          0.          0.
   0.5         0.          0.          0.          0.          0.          0.
   0.        ]
 [ 0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.08695652  0.          0.04347826  0.          0.          0.
   0.          0.          0.17391304  0.          0.04347826  0.47826087
   0.          0.          0.          0.          0.08695652  0.04347826
   0.04347826  0.          0.        ]
 [ 0.          0.11538462  0.03846154  0.          0.          0.          0.
   0.07692308  0.          0.03846154  0.          0.          0.          0.
   0.          0.          0.          0.03846154  0.07692308  0.
   0.07692308  0.          0.          0.          0.          0.          0.
   0.15384615  0.03846154  0.          0.          0.          0.
   0.15384615  0.15384615  0.03846154]
 [ 0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.5         0.          0.5
   0.        ]
 [ 0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          0.          0.          0.          0.          0.          0.
   0.          1.          0.          0.          0.          0.          0.
   0.        ]]
[ 0.          0.03365385  0.00480769  0.          0.          0.          0.
  0.13898601  0.          0.00480769  0.          0.          0.          0.
  0.          0.01086957  0.          0.03908863  0.00961538  0.
  0.00961538  0.          0.0625      0.02173913  0.02272727  0.00543478
  0.05978261  0.03846154  0.07298951  0.125       0.          0.01086957
  0.19293478  0.02466555  0.08173077  0.02972028]

In [307]:
get_nearest_net(template,[-24,-10, 22])

get_nearest_net(template,[17, -14, -22])


Out[307]:
30

In [308]:
def gen1perm(n_seeds,proba):
    ratio_votes_1study = np.zeros_like(proba)
    perm_votes = np.random.choice(range(0,len(proba)),size=(n_seeds,1),p=proba)
    for j in perm_votes:
        ratio_votes_1study[j] = (perm_votes == j).sum()/float(len(perm_votes))
    return ratio_votes_1study

# check if the proba is respected 
#print proba_networks
#gen1perm(10000,proba_networks)
#ange(0,len(proba_networks))

Generate random coordinates

The assigned coodinates are generated for each network witha proability equivalent to there volume size compare to the total volume of the brain


In [309]:
'''
from numpy.random import permutation
def permute_table(frequency_votes,n_iter):
    h0_results = []
    for n in range(n_iter):
        perm_freq = frequency_votes.copy()
        #print perm_freq
        for i in range(perm_freq.shape[0]):
            perm_freq[i,:] = permutation(perm_freq[i,:])
        #print perm_freq
        h0_results.append(np.mean(perm_freq,axis=0))
    return np.array(h0_results).T
'''
def compute_freq(votes,data_ratio_votes,seed_data,proba):
    # take one vote for each study only
    uni_pmid = np.unique(seed_data['PMID'])
    ratio_votes=np.zeros((data_ratio_votes.shape[0],data_ratio_votes.shape[1],10000))
    for idx_perm in range(ratio_votes.shape[-1]):
        #    frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
        for i in range(len(uni_pmid)):
            aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
            n_seeds = len(aa)
            ratio_votes[i,:,idx_perm] = gen1perm(n_seeds,proba)
        #print ratio_votes.shape
    # compute the frequency
    freq_data = np.mean(ratio_votes,axis=0)
        
    for i in range(freq_data.shape[0]):
        freq_data[i,:] = np.sort(freq_data[i,:])[::-1]
        
    return freq_data

# Total volume of the brain
total_volume = np.sum(template.get_data()>0)

# compute the proba of each network
proba_networks=[]
for i in range(1,len(network_votes)+1):
    proba_networks.append(np.sum(template.get_data()==i)/(total_volume*1.))
proba_networks = np.array(proba_networks)
print np.sum(proba_networks)
print proba_networks

# generate random values 
'''
def gen_rnd_hits(proba,n_seeds):
    results_h0 =  np.random.choice(range(0,len(proba)),size=(n_seeds,1000),p=proba)
    #results_h0 = permute_table(frequency_votes,1000)
    print results_h0.shape
    ditributions = []
    for i in range(frequency_votes.shape[1]):
        results_h0[i,:] = np.sort(results_h0[i,:])[::-1]
        #ditributions.append(one_way_pdf)   
    #return ditributions
    return results_h0
'''
#dist_data = gen_rnd_hits(proba_networks,np.sum(network_votes))
dist_data = compute_freq(votes,frequency_votes,seed_data,proba_networks)


1.0
[ 0.01292611  0.01787506  0.01579858  0.01342793  0.01671569  0.01412009
  0.01960547  0.02496972  0.01775394  0.02441599  0.01770202  0.02941685
  0.02853435  0.01915556  0.02929573  0.03292957  0.03163177  0.02310088
  0.02479668  0.03317183  0.04075099  0.02912269  0.02611178  0.02978024
  0.04455788  0.02891504  0.01429313  0.02278941  0.02957259  0.03215089
  0.0433293   0.03299879  0.03421007  0.03739401  0.04514622  0.06153314]

In [310]:
plt.figure()
plt.hist(dist_data[0],bins=np.arange(0,1,.01))
plt.figure()
plt.plot(dist_data[0].T)


Out[310]:
[<matplotlib.lines.Line2D at 0x7fc20b822c50>]

Generate the p-values for each network


In [311]:
def getpval_old(nhit,dist_data):
    distribution_val =  np.histogram(dist_data,bins=np.arange(0,1,0.01))
    idx_bin = np.where((distribution_val[1]>=round(nhit,2)) & (distribution_val[1]<=round(nhit,2)))[0][0]
    #print distribution_val[1]
    return (np.sum(distribution_val[0][idx_bin:-1])+1)/(dist_data.shape[0]+1.)

def getpval(target,dist_data):
    dist_sorted = np.sort(np.copy(dist_data))
    b = np.sum(dist_sorted > target)
    #print b
    #print dist_data.shape[0]
    #print distribution_val[1]
    return ((b+1.)/(dist_data.shape[0]+1.))

print network_votes

pval_results=[]
for i in range(0,len(dist_data)):
    pval_results.append(getpval(network_votes[i],dist_data[i,:]))
    
print pval_results
plt.figure()
plt.bar(np.arange(1,len(pval_results)+1),pval_results,width=0.5,align='center')
plt.xlabel('Networks')
plt.ylabel('p-value')


[ 0.          0.03365385  0.00480769  0.          0.          0.          0.
  0.13898601  0.          0.00480769  0.          0.          0.          0.
  0.          0.01086957  0.          0.03908863  0.00961538  0.
  0.00961538  0.          0.0625      0.02173913  0.02272727  0.00543478
  0.05978261  0.03846154  0.07298951  0.125       0.          0.01086957
  0.19293478  0.02466555  0.08173077  0.02972028]
[0.70642935706429355, 0.15408459154084592, 0.68043195680431956, 0.71362863713628633, 0.79532046795320466, 0.72792720727927207, 0.84511548845115492, 0.0053994600539946005, 0.80821917808219179, 0.84091590840915909, 0.80321967803219674, 0.94290570942905705, 0.93360663933606636, 0.83551644835516448, 0.93660633936606341, 0.73662633736626337, 0.94660533946605341, 0.18858114188581143, 0.67793220677932209, 0.95880411958804124, 0.8951104889511049, 0.93460653934606541, 0.14478552144785523, 0.38886111388861117, 0.62303769623037697, 0.83961603839616039, 0.087791220877912204, 0.19548045195480451, 0.11848815118488151, 0.0184981501849815, 0.98490150984901514, 0.74242575742425754, 0.00059994000599940007, 0.49305069493050696, 0.17698230176982302, 0.73082691730826921]
Out[311]:
<matplotlib.text.Text at 0x7fc20a734310>

Map the p-values to the template


In [312]:
from proteus.matrix import tseries as ts
hitfreq_vol = ts.vec2map(network_votes,template)
pval_vol = ts.vec2map(1-np.array(pval_results),template)
plt.figure()
plotting.plot_stat_map(hitfreq_vol,cut_coords=(0,0,0),draw_cross=False)
plt.figure()
plotting.plot_stat_map(pval_vol,cut_coords=(0,0,0),draw_cross=False)


Out[312]:
<nilearn.plotting.displays.OrthoSlicer at 0x7fc20d4dbed0>
<matplotlib.figure.Figure at 0x7fc20a378990>
<matplotlib.figure.Figure at 0x7fc20a4697d0>

FDR correction of the p-values


In [313]:
# correct for FRD
from statsmodels.sandbox.stats.multicomp import fdrcorrection0

fdr_test,fdr_pval=fdrcorrection0(pval_results,alpha=0.05)
print network_votes
print fdr_test
print fdr_pval


[ 0.          0.03365385  0.00480769  0.          0.          0.          0.
  0.13898601  0.          0.00480769  0.          0.          0.          0.
  0.          0.01086957  0.          0.03908863  0.00961538  0.
  0.00961538  0.          0.0625      0.02173913  0.02272727  0.00543478
  0.05978261  0.03846154  0.07298951  0.125       0.          0.01086957
  0.19293478  0.02466555  0.08173077  0.02972028]
[False False False False False False False False False False False False
 False False False False False False False False False False False False
 False False False False False False False False  True False False False]
[ 0.98490151  0.70372963  0.98490151  0.98490151  0.98490151  0.98490151
  0.98490151  0.09719028  0.98490151  0.98490151  0.98490151  0.98490151
  0.98490151  0.98490151  0.98490151  0.98490151  0.98490151  0.70372963
  0.98490151  0.98490151  0.98490151  0.98490151  0.70372963  0.98490151
  0.98490151  0.98490151  0.70372963  0.70372963  0.70372963  0.2219778
  0.98490151  0.98490151  0.02159784  0.98490151  0.70372963  0.98490151]

In [314]:
# save the results

path_output = '/home/cdansereau/git/Projects/metaad/maps_results/'
stats_results = {'Hits':network_votes ,'pvalues':pval_results,'fdr_test':fdr_test,'fdr_pval':fdr_pval,'n_outofbrain':n_outofbrain}
scipy.io.savemat(path_output + output_stats, stats_results)
hitfreq_vol.to_filename(os.path.join(path_output,output_vol))
#hitfreq_vol.to_filename(os.path.join('/home/cdansereau/git/Projects/metaad/maps_results/','AD_pval_vol.nii.gz'))