In [1]:
import nibabel as nib
In [2]:
from dipy.data import read_stanford_labels
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.get_affine()
In [3]:
white_matter = (labels == 1) | (labels == 2)
In [4]:
from dipy.reconst.shm import CsaOdfModel
from dipy.data import default_sphere
from dipy.direction import peaks_from_model
csa_model = CsaOdfModel(gtab, sh_order=6)
csa_peaks = peaks_from_model(csa_model, data, default_sphere,
relative_peak_threshold=.8,
min_separation_angle=45,
mask=white_matter)
In [5]:
from dipy.tracking.local import ThresholdTissueClassifier
classifier = ThresholdTissueClassifier(csa_peaks.gfa, .1)
In [6]:
from dipy.tracking import utils
#seed_mask = nib.load('./warped_midsag.nii.gz').get_data()
seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine)
In [7]:
seeds
Out[7]:
In [8]:
from dipy.tracking.local import LocalTracking
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
# Initialization of LocalTracking. The computation happens in the next step.
streamlines = LocalTracking(csa_peaks, classifier, seeds, affine, step_size=.5)
# Compute streamlines and store as a list.
streamlines = [s for s in streamlines if s.shape[0]>10]
In [9]:
len(streamlines)
Out[9]:
In [12]:
# Prepare the display objects.
streamlines_actor = fvtk.line(streamlines, line_colors(streamlines))
# Create the 3d display.
r = fvtk.ren()
fvtk.add(r, streamlines_actor)
In [14]:
fvtk.show(r)
In [15]:
data.shape
Out[15]:
In [20]:
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = hardi_img.get_header().get_zooms()[:3]
hdr['voxel_order'] = 'RAS'
hdr['dim'] = data.shape[:3]
hdr['vox_to_ras'] = hardi_img.get_affine()
streamlines_trk = ((sl, None, None) for sl in streamlines)
ten_sl_fname = 'csa_white_matter.trk'
nib.trackvis.write(ten_sl_fname, streamlines_trk, hdr, points_space='rasmm')
In [ ]: