In [1]:
# AUTHOR Christian Dansereau 2016

%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt

import pandas as pd
import scipy.io
import os
import nibabel as nib
from nibabel.affines import apply_affine
from nilearn import plotting
import numpy.linalg as npl

Load data


In [2]:
#seed_data = pd.read_csv('20160128_AD_Decrease_Meta_Christian.csv')

template_036= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale036.nii.gz')
template_020= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale020.nii.gz')
template_012= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale012.nii.gz')
template_007= nib.load('/home/cdansereau/data/template_cambridge_basc_multiscale_nii_sym/template_cambridge_basc_multiscale_sym_scale007.nii.gz')

scale = '7'

if scale == '7':
    template = template_007
else:
    template = template_036

#seed_data = pd.read_csv('20160205_AD_Decrease_Meta_Final.csv')
#seed_data = pd.read_csv('20160129_AD_Increase_Meta_Final.csv')

#seed_data = pd.read_csv('20160205_MCI_Decrease_Meta_Final.csv')
#seed_data = pd.read_csv('20160204_MCI_Increase_Meta_Final.csv')

seed_data = pd.read_csv('20160205_ADMCI_Decrease_Meta_Final.csv')
#seed_data = pd.read_csv('20160129_ADMCI_Increase_Meta_Final.csv')

# ******************* #

#output_stats = 'AD_decrease_scale'+scale+'_stats.mat'
#output_vol   = 'AD_decrease_ratio'+scale+'_vol.nii.gz'
#output_stats = 'AD_increase_scale'+scale+'_stats.mat'
#output_vol   = 'AD_increase_ratio_scale'+scale+'_vol.nii.gz'

#output_stats = 'MCI_decrease_scale'+scale+'_stats.mat'
#output_vol   = 'MCI_decrease_ratio_scale'+scale+'_vol.nii.gz'
#output_stats = 'MCI_increase_scale'+scale+'_stats.mat'
#output_vol   = 'MCI_increase_ratio_scale'+scale+'_vol.nii.gz'

output_stats = 'ADMCI_decrease_scale'+scale+'_stats.mat'
output_vol   = 'ADMCI_decrease_ratio_scale'+scale+'_vol.nii.gz'
#output_stats = 'ADMCI_increase_scale'+scale+'_stats.mat'
#output_vol   = 'ADMCI_increase_ratio_scale'+scale+'_vol.nii.gz'

In [3]:
seed_data


Out[3]:
PMID Author Year subjects x y z Contrast Direction
0 18786570 Zhang 2009 32 -13.00 54.00 23.00 ADMCI Increase
1 18786570 Zhang 2009 32 -45.00 33.00 48.00 ADMCI Increase
2 18786570 Zhang 2009 32 -45.00 24.00 18.00 ADMCI Increase
3 18786570 Zhang 2009 32 -51.00 3.00 15.00 ADMCI Increase
4 18786570 Zhang 2009 32 57.00 42.00 24.00 ADMCI Increase
5 18786570 Zhang 2009 32 12.00 57.00 -15.00 ADMCI Increase
6 18786570 Zhang 2009 32 -33.00 -9.00 -36.00 ADMCI Increase
7 20656843 Zhang 2010 27 -33.00 -13.00 43.00 ADMCI Increase
8 20656843 Zhang 2010 27 -39.00 -15.00 42.00 ADMCI Increase
9 20656843 Zhang 2010 27 51.00 24.00 9.00 ADMCI Increase
10 20656843 Zhang 2010 27 15.00 9.00 51.00 ADMCI Increase
11 20656843 Zhang 2010 27 -12.00 39.00 54.00 ADMCI Increase
12 20656843 Zhang 2010 27 37.00 -85.00 30.00 ADMCI Increase
13 20656843 Zhang 2010 27 -52.00 -61.00 45.00 ADMCI Increase
14 20656843 Zhang 2010 27 -30.00 -12.00 -21.00 ADMCI Increase
15 20656843 Zhang 2010 28 -15.00 0.00 51.00 ADMCI Increase
16 20656843 Zhang 2010 28 -30.00 33.00 34.00 ADMCI Increase
17 20656843 Zhang 2010 28 15.00 6.00 51.00 ADMCI Increase
18 20656843 Zhang 2010 28 18.00 -13.00 58.00 ADMCI Increase
19 20656843 Zhang 2010 28 -42.00 48.00 24.00 ADMCI Increase
20 20656843 Zhang 2010 28 51.00 28.00 12.00 ADMCI Increase
21 20656843 Zhang 2010 28 52.00 20.00 28.00 ADMCI Increase
22 20656843 Zhang 2010 28 -45.00 -35.00 49.00 ADMCI Increase
23 20656843 Zhang 2010 28 57.00 -39.00 42.00 ADMCI Increase
24 19833321 Sheline 2010 83 7.91 -102.08 8.86 ADMCI Increase
25 20410145 Zhou 2010 24 12.00 32.00 30.00 ADMCI Increase
26 20410145 Zhou 2010 24 -12.00 34.00 16.00 ADMCI Increase
27 20410145 Zhou 2010 24 10.00 34.00 -6.00 ADMCI Increase
28 20410145 Zhou 2010 24 -26.00 -12.00 10.00 ADMCI Increase
29 20410145 Zhou 2010 24 -18.00 -2.00 10.00 ADMCI Increase
... ... ... ... ... ... ... ... ... ...
108 20930266 Bai 2011 44 -3.00 -63.00 9.00 ADMCI Increase
109 21982164 Jin 2012 16 8.00 46.00 36.00 ADMCI Increase
110 21982164 Jin 2012 16 -46.00 -42.00 54.00 ADMCI Increase
111 21982164 Jin 2012 16 -2.00 -30.00 36.00 ADMCI Increase
112 21982164 Jin 2012 16 4.00 -28.00 36.00 ADMCI Increase
113 22005016 Han 2012 80 -45.00 -27.00 -27.00 ADMCI Increase
114 22005016 Han 2012 80 -6.00 30.00 -27.00 ADMCI Increase
115 22005016 Han 2012 80 60.00 12.00 24.00 ADMCI Increase
116 22005016 Han 2012 80 -57.00 -9.00 36.00 ADMCI Increase
117 22451310 Liang 2012 32 -36.00 57.00 21.00 ADMCI Increase
118 22451310 Liang 2012 32 -45.00 21.00 39.00 ADMCI Increase
119 22451310 Liang 2012 32 -15.00 -6.00 21.00 ADMCI Increase
120 22451310 Liang 2012 32 -6.00 12.00 3.00 ADMCI Increase
121 22451310 Liang 2012 32 -15.00 12.00 12.00 ADMCI Increase
122 22451310 Liang 2012 32 -6.00 -12.00 3.00 ADMCI Increase
123 22451310 Liang 2012 32 -9.00 -12.00 12.00 ADMCI Increase
124 22451310 Liang 2012 32 -45.00 -27.00 15.00 ADMCI Increase
125 22451310 Liang 2012 32 -39.00 -33.00 18.00 ADMCI Increase
126 22451310 Liang 2012 32 6.00 -12.00 15.00 ADMCI Increase
127 22451310 Liang 2012 32 15.00 -9.00 12.00 ADMCI Increase
128 22451310 Liang 2012 32 9.00 -12.00 0.00 ADMCI Increase
129 25043909 Pasquini 2015 44 -30.00 -14.00 -26.00 ADMCI Increase
130 25547636 Gardini 2015 42 -1.88 -39.72 5.85 ADMCI Increase
131 25547636 Gardini 2015 42 4.47 -42.82 6.03 ADMCI Increase
132 25547636 Gardini 2015 42 42.33 -19.32 -29.10 ADMCI Increase
133 25745400 Yi 2015 32 -20.00 -70.00 16.00 ADMCI Increase
134 25745400 Yi 2015 32 22.00 -68.00 58.00 ADMCI Increase
135 25745400 Yi 2015 32 56.00 -50.00 14.00 ADMCI Increase
136 25745400 Yi 2015 32 -48.00 -72.00 18.00 ADMCI Increase
137 25745400 Yi 2015 32 0.00 -50.00 -2.00 ADMCI Increase

138 rows × 9 columns

Get the number of coordinates reported for each network


In [4]:
from numpy.linalg import norm
# find the closest network to the coordo
def get_nearest_net(template,world_coor):
    list_coord = np.array(np.where(template.get_data()>0))
    mni_coord = apply_affine(template.get_affine(),list_coord.T)
    distances = norm(mni_coord-np.array(world_coor),axis=1)
    #print distances.shape
    idx_nearest_net = np.where(distances == np.min(distances))[0][0]
    return int(template.get_data()[list_coord[:,idx_nearest_net][0],list_coord[:,idx_nearest_net][1],list_coord[:,idx_nearest_net][2]])

def get_nearest_voxel(template,world_coor):
    list_coord = np.array(np.where(template.get_data()>0))
    mni_coord = apply_affine(template.get_affine(),list_coord.T)
    distances = norm(mni_coord-np.array(world_coor),axis=1)
    #print distances.shape
    idx_nearest_net = np.where(distances == np.min(distances))[0][0]
    #return int(template.get_data()[list_coord[:,idx_nearest_net][0],list_coord[:,idx_nearest_net][1],list_coord[:,idx_nearest_net][2]])
    #return mni_coord[:,idx_nearest_net][0],mni_coord[:,idx_nearest_net][1],mni_coord[:,idx_nearest_net][2]
    return mni_coord[idx_nearest_net,:]


#get_nearest_net(template,[-15,-10,-10])
# Convert from world MNI space to the EPI voxel space
def get_world2vox(template, mni_coord):
    return np.round(apply_affine(npl.inv(template.get_affine()),mni_coord)+[1])
    
network_votes = np.zeros((np.max(template.get_data().flatten()),1))[:,0]
network_votes

# get the voxel coordinates of the MNI seeds
mni_space_targets = seed_data[['x','y','z']].values
vox_corrd = get_world2vox(template,mni_space_targets)
votes = []
n_outofbrain=0
for i in range(vox_corrd.shape[0]):
    net_class = template.get_data()[vox_corrd[i,0],vox_corrd[i,1],vox_corrd[i,2]]
    if net_class==0:
        n_outofbrain+=1
        votes.append(get_nearest_net(template,[mni_space_targets[i,0],mni_space_targets[i,1],mni_space_targets[i,2]]))
    else:
        votes.append(net_class)

print('Out of brain coordinates: '+ str(n_outofbrain))
votes = np.array(votes)    

# take one vote for each study only
uni_pmid = np.unique(seed_data['PMID'])
votes.shape
frequency_votes=np.zeros((len(uni_pmid),len(network_votes)))
#for i in range(len(uni_pmid)):
#    frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
for i in range(len(uni_pmid)):
    aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
    for j in aa:
        frequency_votes[i,j-1] = (aa == j).sum()/float(len(aa))
print frequency_votes


# compile the stats for each network
#for i in range(1,len(network_votes)+1):
#    network_votes[i-1] = np.mean(frequency_votes==i)
network_votes = np.mean(frequency_votes,axis=0)
print network_votes 
#vox_corrd[np.array(votes)==5,:]


Out of brain coordinates: 15
[[ 0.          0.14285714  0.          0.          0.28571429  0.57142857
   0.        ]
 [ 0.          0.2         0.          0.          0.6         0.2         0.        ]
 [ 0.          0.          0.          1.          0.          0.          0.        ]
 [ 0.          0.          0.          0.          0.23076923  0.15384615
   0.61538462]
 [ 0.          0.05882353  0.11764706  0.          0.29411765  0.17647059
   0.35294118]
 [ 0.          0.          0.          0.          1.          0.          0.        ]
 [ 0.          0.          0.          0.          0.6         0.4         0.        ]
 [ 0.          0.          0.          0.          0.25        0.          0.75      ]
 [ 0.          0.5         0.5         0.          0.          0.          0.        ]
 [ 0.          0.          0.          0.          0.          0.          1.        ]
 [ 0.          0.          0.          0.          0.          0.16666667
   0.83333333]
 [ 0.          0.          0.          0.          0.5         0.5         0.        ]
 [ 0.          0.          0.34782609  0.2173913   0.13043478  0.13043478
   0.17391304]
 [ 0.          0.11538462  0.03846154  0.          0.15384615  0.23076923
   0.46153846]
 [ 0.          0.          0.          0.          0.          0.5         0.5       ]
 [ 0.          0.          0.          0.          0.5         0.5         0.        ]
 [ 0.          1.          0.          0.          0.          0.          0.        ]
 [ 0.          0.33333333  0.          0.          0.66666667  0.          0.        ]
 [ 0.          0.2         0.          0.2         0.4         0.2         0.        ]]
[ 0.          0.13423151  0.05283867  0.07459954  0.29534467  0.19629558
  0.24669003]

In [5]:
get_nearest_net(template,[-24,-10, 22])

get_nearest_voxel(template,[-24,-10, 22])


Out[5]:
array([-21., -10.,  22.])

In [6]:
print '#AD<HC'
print '#15:    48.0 -12.0 66.0'
print get_nearest_voxel(template,[48,-12, 66])
print '#34:    50.0 0 61.0'
print get_nearest_voxel(template,[50,0, 61])
print '#35:    48.0 -12.0 66.0'
print get_nearest_voxel(template,[48,-12, 66])
print '#46:    0 -96.0 28.0'
print get_nearest_voxel(template,[0,-96, 28])
print '#52:    -4.510000228881836 13.970000267028809 -30.84000015258789'
print get_nearest_voxel(template,[-4.510000228881836,13.970000267028809, -30.84000015258789])

print 'AD>HC'
print '#5:    57.0 42.0 24.0'
print get_nearest_voxel(template,[57,42, 24])
print '#105:    45.0 55.0 31.0'
print get_nearest_voxel(template,[45,55, 31])


#AD<HC
#15:    48.0 -12.0 66.0
[ 45. -13.  64.]
#34:    50.0 0 61.0
[ 48.  -4.  58.]
#35:    48.0 -12.0 66.0
[ 45. -13.  64.]
#46:    0 -96.0 28.0
[  3. -94.  28.]
#52:    -4.510000228881836 13.970000267028809 -30.84000015258789
[  0.  17. -23.]
AD>HC
#5:    57.0 42.0 24.0
[ 51.  38.  22.]
#105:    45.0 55.0 31.0
[ 39.  50.  28.]

In [7]:
print 'AD decrease'
print '#10:    6.0 55.0 47.0'
print 'MNI coord',get_nearest_voxel(template,[6.0, 55.0, 47.0])
print '#20:    -22.0 -70.0 60.0'
print 'MNI coord',get_nearest_voxel(template,[-22.0, -70.0, 60.0])
print '#108:    30.0 -94.0 20.0'
print 'MNI coord',get_nearest_voxel(template,[30.0, -94.0, 20.0])
print 'AD increase'
print '#2:    -45.0 33.0 48.0'
print 'MNI coord',get_nearest_voxel(template,[-45.0, 33.0, 48.0])
print '#19:    37.0 -85.0 30.0'
print 'MNI coord',get_nearest_voxel(template,[37.0, -85.0, 30.0])
print '#20:    -52.0 -61.0 45.0'
print 'MNI coord',get_nearest_voxel(template,[-52.0, -61.0, 45.0])
print '#31:    7.909999847412109 -102.08000183105469 8.859999656677246'
print 'MNI coord',get_nearest_voxel(template,[7.909999847412109, -102.08000183105469, 8.859999656677246])
print 'MCI decresae'
print '#6:    45.0 3.0 57.0'
print 'MNI coord',get_nearest_voxel(template,[45.0, 3.0, 57.0])
print '#7:    54.0 9.0 45.0'
print 'MNI coord',get_nearest_voxel(template,[54.0, 9.0, 45.0])
print '#14:    57.0 -21.0 51.0'
print 'MNI coord',get_nearest_voxel(template,[57.0, -21.0, 51.0])
print '#32:    5.769999980926514 -97.69000244140625 10.75'
print 'MNI coord',get_nearest_voxel(template,[5.769999980926514, -97.69000244140625, 10.75])
print '#80:    42.0 -78.0 36.0'
print 'MNI coord',get_nearest_voxel(template,[42.0, -78.0, 36.0])
print '#82:    -54.0 -72.0 18.0'
print 'MNI coord',get_nearest_voxel(template,[-54.0, -72.0, 18.0])
print '#105:    50.0 -58.0 50.0'
print 'MNI coord',get_nearest_voxel(template,[50.0, -58.0, 50.0])
print '#107:    50.0 -58.0 50.0'
print 'MNI coord',get_nearest_voxel(template,[50.0, -58.0, 50.0])
print 'MCI increase'
print '#18:    -36.0 57.0 21.0'
print 'MNI coord',get_nearest_voxel(template,[-36.0, 57.0, 21.0])


AD decrease
#10:    6.0 55.0 47.0
MNI coord [  6.  53.  46.]
#20:    -22.0 -70.0 60.0
MNI coord [-21. -70.  61.]
#108:    30.0 -94.0 20.0
MNI coord [ 30. -94.  19.]
AD increase
#2:    -45.0 33.0 48.0
MNI coord [-42.  32.  46.]
#19:    37.0 -85.0 30.0
MNI coord [ 36. -85.  31.]
#20:    -52.0 -61.0 45.0
MNI coord [-51. -61.  46.]
#31:    7.909999847412109 -102.08000183105469 8.859999656677246
MNI coord [   9. -103.   10.]
MCI decresae
#6:    45.0 3.0 57.0
MNI coord [ 45.   2.  58.]
#7:    54.0 9.0 45.0
MNI coord [ 54.   8.  43.]
#14:    57.0 -21.0 51.0
MNI coord [ 57. -22.  52.]
#32:    5.769999980926514 -97.69000244140625 10.75
MNI coord [  6. -97.  10.]
#80:    42.0 -78.0 36.0
MNI coord [ 42. -79.  37.]
#82:    -54.0 -72.0 18.0
MNI coord [-54. -73.  19.]
#105:    50.0 -58.0 50.0
MNI coord [ 51. -58.  49.]
#107:    50.0 -58.0 50.0
MNI coord [ 51. -58.  49.]
MCI increase
#18:    -36.0 57.0 21.0
MNI coord [-36.  56.  22.]

In [8]:
def gen1perm(n_seeds,proba):
    ratio_votes_1study = np.zeros_like(proba)
    perm_votes = np.random.choice(range(0,len(proba)),size=(n_seeds,1),p=proba)
    for j in perm_votes:
        ratio_votes_1study[j] = (perm_votes == j).sum()/float(len(perm_votes))
    return ratio_votes_1study

# check if the proba is respected 
#print proba_networks
#gen1perm(10000,proba_networks)
#ange(0,len(proba_networks))

Generate random coordinates

The assigned coodinates are generated for each network witha proability equivalent to there volume size compare to the total volume of the brain


In [9]:
'''
from numpy.random import permutation
def permute_table(frequency_votes,n_iter):
    h0_results = []
    for n in range(n_iter):
        perm_freq = frequency_votes.copy()
        #print perm_freq
        for i in range(perm_freq.shape[0]):
            perm_freq[i,:] = permutation(perm_freq[i,:])
        #print perm_freq
        h0_results.append(np.mean(perm_freq,axis=0))
    return np.array(h0_results).T
'''
def compute_freq(votes,data_ratio_votes,seed_data,proba):
    # take one vote for each study only
    uni_pmid = np.unique(seed_data['PMID'])
    ratio_votes=np.zeros((data_ratio_votes.shape[0],data_ratio_votes.shape[1],10000))
    for idx_perm in range(ratio_votes.shape[-1]):
        #    frequency_votes = np.hstack((frequency_votes,np.unique(votes[(seed_data['PMID']==uni_pmid[i]).values])))
        for i in range(len(uni_pmid)):
            aa = votes[(seed_data['PMID']==uni_pmid[i]).values]
            n_seeds = len(aa)
            ratio_votes[i,:,idx_perm] = gen1perm(n_seeds,proba)
        #print ratio_votes.shape
    # compute the frequency
    freq_data = np.mean(ratio_votes,axis=0)
        
    for i in range(freq_data.shape[0]):
        freq_data[i,:] = np.sort(freq_data[i,:])[::-1]
        
    return freq_data

# Total volume of the brain
total_volume = np.sum(template.get_data()>0)

# compute the proba of each network
proba_networks=[]
for i in range(1,len(network_votes)+1):
    proba_networks.append(np.sum(template.get_data()==i)/(total_volume*1.))
proba_networks = np.array(proba_networks)
print np.sum(proba_networks)
print proba_networks

# generate random values 
'''
def gen_rnd_hits(proba,n_seeds):
    results_h0 =  np.random.choice(range(0,len(proba)),size=(n_seeds,1000),p=proba)
    #results_h0 = permute_table(frequency_votes,1000)
    print results_h0.shape
    ditributions = []
    for i in range(frequency_votes.shape[1]):
        results_h0[i,:] = np.sort(results_h0[i,:])[::-1]
        #ditributions.append(one_way_pdf)   
    #return ditributions
    return results_h0
'''
#dist_data = gen_rnd_hits(proba_networks,np.sum(network_votes))
dist_data = compute_freq(votes,frequency_votes,seed_data,proba_networks)


1.0
[ 0.12507354  0.0961412   0.08271327  0.11642153  0.15549403  0.21121301
  0.21294342]

In [10]:
plt.figure()
plt.hist(dist_data[0],bins=np.arange(0,1,.01))
plt.figure()
plt.plot(dist_data[0].T)


Out[10]:
[<matplotlib.lines.Line2D at 0x7f1f8e29bb10>]

Generate the p-values for each network


In [11]:
def getpval_old(nhit,dist_data):
    distribution_val =  np.histogram(dist_data,bins=np.arange(0,1,0.01))
    idx_bin = np.where((distribution_val[1]>=round(nhit,2)) & (distribution_val[1]<=round(nhit,2)))[0][0]
    #print distribution_val[1]
    return (np.sum(distribution_val[0][idx_bin:-1])+1)/(dist_data.shape[0]+1.)

def getpval(target,dist_data):
    dist_sorted = np.sort(np.copy(dist_data))
    b = np.sum(dist_sorted > target)
    #print b
    #print dist_data.shape[0]
    #print distribution_val[1]
    return ((b+1.)/(dist_data.shape[0]+1.))

print network_votes

pval_results=[]
for i in range(0,len(dist_data)):
    pval_results.append(getpval(network_votes[i],dist_data[i,:]))
    
print pval_results
plt.figure()
plt.bar(np.arange(1,len(pval_results)+1),pval_results,width=0.5,align='center')
plt.xlabel('Networks')
plt.ylabel('p-value')


[ 0.          0.13423151  0.05283867  0.07459954  0.29534467  0.19629558
  0.24669003]
[1.0, 0.15578442155784422, 0.80521947805219474, 0.855014498550145, 0.0039996000399960003, 0.59174082591740829, 0.24157584241575841]
Out[11]:
<matplotlib.text.Text at 0x7f1f8e183e50>

Map the p-values to the template


In [12]:
from proteus.matrix import tseries as ts
hitfreq_vol = ts.vec2map(network_votes,template)
pval_vol = ts.vec2map(1-np.array(pval_results),template)
plt.figure()
plotting.plot_stat_map(hitfreq_vol,cut_coords=(0,0,0),draw_cross=False)
plt.figure()
plotting.plot_stat_map(pval_vol,cut_coords=(0,0,0),draw_cross=False)


Out[12]:
<nilearn.plotting.displays.OrthoSlicer at 0x7f1f7d818810>
<matplotlib.figure.Figure at 0x7f1f7dc57190>
<matplotlib.figure.Figure at 0x7f1f7dcb3dd0>

FDR correction of the p-values


In [13]:
# correct for FRD
from statsmodels.sandbox.stats.multicomp import fdrcorrection0

fdr_test,fdr_pval=fdrcorrection0(pval_results,alpha=0.05)
print network_votes
print fdr_test
print fdr_pval


[ 0.          0.13423151  0.05283867  0.07459954  0.29534467  0.19629558
  0.24669003]
[False False False False  True False False]
[ 1.          0.54524548  0.99751691  0.99751691  0.0279972   0.99751691
  0.56367697]

In [14]:
# save the results

path_output = '/home/cdansereau/git/Projects/metaad/maps_results/'
stats_results = {'Hits':network_votes ,'pvalues':pval_results,'fdr_test':fdr_test,'fdr_pval':fdr_pval,'n_outofbrain':n_outofbrain}
scipy.io.savemat(path_output + output_stats, stats_results)
hitfreq_vol.to_filename(os.path.join(path_output,output_vol))
#hitfreq_vol.to_filename(os.path.join('/home/cdansereau/git/Projects/metaad/maps_results/','AD_pval_vol.nii.gz'))