This notebook provides examples of analysis of resting fMRI.
To grab the data, you should first do the following:
bash ~/fmri-analysis-vm/get_resting.sh
This will grab 4 sessions of resting data from ses-105, along with the field maps for this session. These data have been motion-corrected to a common target so that they are aligned to one another. The motion parameter files are derived separately from each session, rather than using the common target.
Credit:
In [2]:
import os,glob
import nibabel
import numpy
import sklearn
import nilearn.input_data
from nilearn.plotting import plot_roi
from nilearn.image.image import mean_img
from nilearn.plotting import plot_stat_map, show
from nilearn.image import index_img,clean_img
from nilearn.decomposition import CanICA
from sklearn.decomposition import FastICA,PCA
import matplotlib.pyplot as plt
from nipype.interfaces import fsl, nipy
from nipype.caching import Memory
mem = Memory(base_dir='.')
import sys
sys.path.append('/home/vagrant/fmri-analysis-vm/analysis/utils')
from compute_fd_dvars import compute_fd,compute_dvars
%matplotlib inline
Load the data, clean it, and compute a mask using the nilearn NiftiMasker function
In [13]:
rsfmri_basedir='/home/vagrant/data/ds031/sub-01/ses-105/mcflirt'
rsfmri_files=glob.glob(os.path.join(rsfmri_basedir,'sub*.nii.gz'))
rsfmri_files.sort()
# load the first image and create the masker object
rsfmri_img=nibabel.load(rsfmri_files[0])
masker= nilearn.input_data.NiftiMasker(mask_strategy='epi')
masker.fit(rsfmri_img)
mask_img = masker.mask_img_
rsfmri={} # nifti handle to cleaned image
fmri_masked=None
# load and clean each image
for f in rsfmri_files:
rsfmri_img=nibabel.load(f)
runnum=int(f.split('_')[3].split('-')[1])
rsfmri[runnum]=nilearn.image.smooth_img(nilearn.image.clean_img(rsfmri_img),'fast')
print('loaded run',runnum)
motparfile=f.replace('nii.gz','par')
mp=numpy.loadtxt(motparfile)
if fmri_masked is None:
fmri_masked=masker.transform(rsfmri[runnum])
motpars=mp
else:
fmri_masked=numpy.vstack((fmri_masked,masker.transform(rsfmri[runnum])))
motpars=numpy.vstack((motpars,mp))
Visualize the mask
In [8]:
# calculate mean image for the background
mean_func_img = '/home/vagrant/data/ds031/sub-01/ses-105/mcflirt/mcflirt_target.nii.gz'
plot_roi(mask_img, mean_func_img, display_mode='y',
cut_coords=4, title="Mask")
Out[8]:
Compute framewise displacement and plot it.
In [9]:
fd=compute_fd(motpars)
numpy.where(fd>1)
plt.figure(figsize=(12,4))
# remove first timepoint from each session
fd[240]=0
fd[480]=0
fd[720]=0
plt.plot(fd)
for c in [240,480,720]:
plt.plot([c,c],
[0,numpy.max(fd)*1.1],'k--')
Run FastICA and PCA on the masked data and compare the components.
In [14]:
ica=FastICA(n_components=5,max_iter=1000)
pca=PCA(n_components=5)
ica.fit(fmri_masked)
pca.fit(fmri_masked)
ica_components_img=masker.inverse_transform(ica.components_)
pca_components_img=masker.inverse_transform(pca.components_)
In [15]:
for i in range(5):
img=index_img(ica_components_img, i)
plot_stat_map(img, mean_func_img,
display_mode='z', cut_coords=4,
title="ICA Component %d"%i)
for i in range(5):
img=index_img(pca_components_img, i)
plot_stat_map(img, mean_func_img,
display_mode='z', cut_coords=4,
title="PCA Component %d"%i)
Run ICA on the masked data, using the CanICA tool from nilearn. Just arbitrarily set 10 components for now.
In [10]:
n_components=10
canica = CanICA(n_components=n_components, smoothing_fwhm=6.,
memory="nilearn_cache", memory_level=2,
threshold=3., verbose=10, random_state=0)
canica.fit(rsfmri_files)
# Retrieve the independent components in brain space
components_img = canica.masker_.inverse_transform(canica.components_)
In [11]:
# Visualize results
for i in range(n_components):
img=index_img(components_img, i)
img_masked=masker.transform(img)
ts=fmri_masked.dot(img_masked.T)
plot_stat_map(img, mean_func_img,
display_mode='z', cut_coords=4, threshold=0.01,
title="Component %d"%i)
plt.figure(figsize=(8,3))
plt.plot(ts)
for c in [240,480,720]:
plt.plot([c,c],
[numpy.min(ts)*1.1,
numpy.max(ts)*1.1],
'k--')
In [ ]: