This code will load the model information, generate the model definition, and run the model estimation using FSL
In [5]:
import nipype.algorithms.modelgen as model # model generation
from nipype.interfaces import fsl, ants
from nipype.interfaces.base import Bunch
import os,json,glob,sys
import numpy
import nibabel
import nilearn.plotting
sys.path.append('../utils/')
from compute_fd_dvars import compute_fd,compute_dvars
from make_event_files_from_json import MakeEventFilesFromJSON
%matplotlib inline
import matplotlib.pyplot as plt
try:
datadir=os.environ['FMRIDATADIR']
assert not datadir==''
except:
datadir='/Users/poldrack/data_unsynced/myconnectome/sub00001'
results_dir = os.path.abspath("../../results")
if not os.path.exists(results_dir):
os.mkdir(results_dir)
from nipype.caching import Memory
mem = Memory(base_dir='.')
print('Using data from',datadir)
Load the scan and model info, and generate the event files for FSL from the information in model.json
In [14]:
subject='sub00001'
session='ses014'
# note - we have to use the anatomy from a different session'
subdir=os.path.join(datadir,'ds031', subject, session)
tasknum=2 # n-back
bolddir=os.path.join(datadir,'ds031/sub00001',session,
'functional')
boldfile=os.path.join(bolddir,
'sub00001_ses014_task002_run001_bold.nii.gz')
preprocessed_epi = os.path.join(results_dir,
"sub00001_ses014_task002_run001_bold_mcf_brain.nii.gz")
scaninfo=json.load(open(os.path.join(subdir,
'functional/sub00001_ses014_task002_run001_bold.json')))
tr=scaninfo['RepetitionTime']
modelfile=os.path.join(subdir,'model.json')
modelinfo=json.load(open(modelfile))
taskinfo=modelinfo['task%03d'%tasknum]['model001']
evs=taskinfo['Variables']
contrasts=taskinfo['Contrasts']
# get the response onsets
response_onsets=[]
for v in evs.keys():
if evs[v]['VariableName'].find('_target_ons')>-1:
for ons in evs[v]['onsets']:
response_onsets.append(ons[0])
Load the motion parameters that we created during preprocessing, so we can use them as regressors. Also generate the framewise displacement.
In [15]:
# First you need to set this to your mcflirt cache directory
mcpars=numpy.loadtxt(os.path.join(results_dir, "motion.par"))
fd=compute_fd(mcpars)
plt.plot(fd,color='g')
Out[15]:
If the necessary files don't exist, then rerun preprocessing on BOLD file
In [19]:
if not os.path.exists(preprocessed_epi):
mcflirt = mem.cache(fsl.MCFLIRT)
mcflirt_results = mcflirt(in_file=boldfile,
mean_vol=True)
boldbet = mem.cache(fsl.BET)
bet_results = boldbet(functional=True,
in_file=mcflirt_results.outputs.out_file,
out_file=preprocessed_epi,
mask=True)
Specify the model. For the sake of speed we will use a simplified model that treats the study as a blocked design rather than modeling each item separately, but we also model instructions and motor responses; this, it is a hybrid block/event-related.
In [20]:
instruction_onsets=list(numpy.array([68,176,372,2,154,416,24,220,350,112,198,328,46,264,394,90,242,306])-2.0)
info = [Bunch(conditions=['faces-1back',
'faces-2back',
'scenes-1back',
'scenes-2back',
'chars-1back',
'chars-2back',
'instructions',
'responses'],
onsets=[[68,176,372],
[2,154,416],
[24,220,350],
[112,198,328],
[46,264,394],
[90,242,306],
instruction_onsets,
response_onsets],
durations=[[20],
[20],
[20],
[20],
[20],
[20],
[2],
[1]],
regressors=[fd],
regressor_names=['FD'])
]
s = model.SpecifyModel()
s.inputs.input_units = 'secs'
s.inputs.functional_runs = preprocessed_epi
s.inputs.time_repetition = tr
s.inputs.high_pass_filter_cutoff = 128.
s.inputs.subject_info = info
specify_model_results = s.run()
s.inputs
Out[20]:
Generate the fsf and ev files using Level1Design.
Exercise: add an additional T contrast for the FD variable.
In [21]:
contrasts=[['faces>Baseline','T',
['faces-1back','faces-2back'],[0.5,0.5]],
['scenes>Baseline','T',
['scenes-1back','scenes-2back'],[0.5,0.5]],
['chars>Baseline','T',
['chars-1back','chars-2back'],[0.5,0.5]],
['2back>1back','T',
['faces-1back','faces-2back','scenes-1back','scenes-2back','chars-1back','chars-2back'],[-1,1,-1,1,-1,1,-1,1]],
['response>Baseline','T',
['responses'],[1]],
['instructions>Baseline','T',
['instructions'],[1]]]
level1design = mem.cache(fsl.model.Level1Design)
level1design_results = level1design(interscan_interval = tr,
bases = {'dgamma':{'derivs': True}},
session_info = specify_model_results.outputs.session_info,
model_serial_correlations=True,
contrasts=contrasts)
level1design_results.outputs
Out[21]:
Generate the full set of model files using FEATModel
In [22]:
modelgen = mem.cache(fsl.model.FEATModel)
modelgen_results = modelgen(fsf_file=level1design_results.outputs.fsf_files,
ev_files=level1design_results.outputs.ev_files)
modelgen_results.outputs
Out[22]:
Visualize the design matrix
In [23]:
desmtx=numpy.loadtxt(modelgen_results.outputs.design_file,skiprows=5)
plt.imshow(desmtx,aspect='auto',interpolation='nearest',cmap='gray')
Out[23]:
Show the correlation matrix for design
In [24]:
cc=numpy.corrcoef(desmtx.T)
plt.imshow(cc,aspect='auto',interpolation='nearest', cmap=plt.cm.cubehelix_r)
plt.colorbar()
Out[24]:
Estimate the model using FILMGLS - this will take a few minutes.
In [25]:
mask = mem.cache(fsl.maths.ApplyMask)
mask_results = mask(in_file=preprocessed_epi,
mask_file=os.path.join(results_dir, "mask.nii.gz"))
mask_results.outputs
Out[25]:
In [26]:
filmgls= mem.cache(fsl.FILMGLS)
filmgls_results = filmgls(in_file=mask_results.outputs.out_file,
design_file = modelgen_results.outputs.design_file,
tcon_file = modelgen_results.outputs.con_file,
autocorr_noestimate = True)
filmgls_results.outputs
Out[26]:
Exercise: Visualize the statistical map for the contrast that tests for effects of the motion variable (FD).
For the group level analysis we need to move results from all subjects into one common MNI space. Let's start with the EPI derived mask (we will use it later for group level mask)
In [27]:
mask_file = os.path.join(results_dir, "mask.nii.gz")
epi_to_t1_warp = os.path.join(results_dir, "epi_to_t1_warp.nii.gz")
t1_to_mni_warp = os.path.join(results_dir, "t1_to_mni_warp.h5")
in_file = mask_file
anat_subject='ses018'
anatomydir=os.path.join(datadir,'ds031/sub00001',anat_subject,
'anatomy')
t1_file = os.path.join(anatomydir,'sub00001_ses018_T1w_001.nii.gz')
epi_to_t1 = mem.cache(fsl.ApplyWarp)
epi_to_t1_mask_results = epi_to_t1(in_file=in_file,
ref_file=t1_file,
field_file=epi_to_t1_warp,
interp="nn")
nilearn.plotting.plot_roi(epi_to_t1_mask_results.outputs.out_file, title="EPI mask in subject T1 space")
t1_to_mni = mem.cache(ants.ApplyTransforms)
t1_to_mni_mask_results = t1_to_mni(input_image=epi_to_t1_mask_results.outputs.out_file,
reference_image=os.path.join(os.getenv('FSLDIR'),'data/standard/MNI152_T1_2mm_brain.nii.gz'),
transforms=t1_to_mni_warp,
interpolation="NearestNeighbor")
t1_to_mni_mask_results.outputs
nilearn.plotting.plot_roi(t1_to_mni_mask_results.outputs.output_image, title="EPI mask in MNI")
Out[27]:
Now we can use the same procedure for all of the contrast and variance images.
In [28]:
image=filmgls_results.outputs.copes[0]
_, fname = os.path.split(image)
epi_to_t1_results = epi_to_t1(in_file=filmgls_results.outputs.copes[0],
ref_file=t1_file,
field_file=epi_to_t1_warp,
interp="spline")
t1_to_mni_results = t1_to_mni(input_image=epi_to_t1_results.outputs.out_file,
reference_image=os.path.join(os.getenv('FSLDIR'),'data/standard/MNI152_T1_2mm_brain.nii.gz'),
transforms=t1_to_mni_warp,
interpolation="BSpline")
nilearn.plotting.plot_stat_map(t1_to_mni_results.outputs.output_image,
title="%s in MNI"%fname, threshold='auto')
Out[28]:
In [ ]: