|
| 1 | +#!/usr/bin/env python |
| 2 | +""" |
| 3 | +================= |
| 4 | +dMRI: Camino, DTI |
| 5 | +================= |
| 6 | +
|
| 7 | +Introduction |
| 8 | +============ |
| 9 | +
|
| 10 | +This script, camino_dti_tutorial.py, demonstrates the ability to perform basic diffusion analysis |
| 11 | +in a Nipype pipeline:: |
| 12 | +
|
| 13 | + python dmri_camino_dti.py |
| 14 | +
|
| 15 | +We perform this analysis using the FSL course data, which can be acquired from here: |
| 16 | +http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz |
| 17 | +
|
| 18 | +Import necessary modules from nipype. |
| 19 | +""" |
| 20 | + |
| 21 | +import nipype.interfaces.io as nio # Data i/o |
| 22 | +import nipype.interfaces.utility as util # utility |
| 23 | +import nipype.pipeline.engine as pe # pypeline engine |
| 24 | +import nipype.interfaces.camino as camino |
| 25 | +import nipype.interfaces.fsl as fsl |
| 26 | +import nipype.interfaces.camino2trackvis as cam2trk |
| 27 | +import nipype.algorithms.misc as misc |
| 28 | +import os # system functions |
| 29 | + |
| 30 | +""" |
| 31 | +We use the following functions to scrape the voxel and data dimensions of the input images. This allows the |
| 32 | +pipeline to be flexible enough to accept and process images of varying size. The SPM Face tutorial |
| 33 | +(fmri_spm_face.py) also implements this inferral of voxel size from the data. |
| 34 | +""" |
| 35 | + |
| 36 | +def get_vox_dims(volume): |
| 37 | + import nibabel as nb |
| 38 | + if isinstance(volume, list): |
| 39 | + volume = volume[0] |
| 40 | + nii = nb.load(volume) |
| 41 | + hdr = nii.get_header() |
| 42 | + voxdims = hdr.get_zooms() |
| 43 | + return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] |
| 44 | + |
| 45 | +def get_data_dims(volume): |
| 46 | + import nibabel as nb |
| 47 | + if isinstance(volume, list): |
| 48 | + volume = volume[0] |
| 49 | + nii = nb.load(volume) |
| 50 | + hdr = nii.get_header() |
| 51 | + datadims = hdr.get_data_shape() |
| 52 | + return [int(datadims[0]), int(datadims[1]), int(datadims[2])] |
| 53 | + |
| 54 | +def get_affine(volume): |
| 55 | + import nibabel as nb |
| 56 | + nii = nb.load(volume) |
| 57 | + return nii.get_affine() |
| 58 | + |
| 59 | +subject_list = ['subj1'] |
| 60 | +fsl.FSLCommand.set_default_output_type('NIFTI') |
| 61 | + |
| 62 | + |
| 63 | +""" |
| 64 | +Map field names to individual subject runs |
| 65 | +""" |
| 66 | + |
| 67 | +info = dict(dwi=[['subject_id', 'data']], |
| 68 | + bvecs=[['subject_id','bvecs']], |
| 69 | + bvals=[['subject_id','bvals']]) |
| 70 | + |
| 71 | +infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']), |
| 72 | + name="infosource") |
| 73 | + |
| 74 | +"""Here we set up iteration over all the subjects. The following line |
| 75 | +is a particular example of the flexibility of the system. The |
| 76 | +``datasource`` attribute ``iterables`` tells the pipeline engine that |
| 77 | +it should repeat the analysis on each of the items in the |
| 78 | +``subject_list``. In the current example, the entire first level |
| 79 | +preprocessing and estimation will be repeated for each subject |
| 80 | +contained in subject_list. |
| 81 | +""" |
| 82 | + |
| 83 | +infosource.iterables = ('subject_id', subject_list) |
| 84 | + |
| 85 | +""" |
| 86 | +Now we create a :class:`nipype.interfaces.io.DataGrabber` object and |
| 87 | +fill in the information from above about the layout of our data. The |
| 88 | +:class:`nipype.pipeline.engine.Node` module wraps the interface object |
| 89 | +and provides additional housekeeping and pipeline specific |
| 90 | +functionality. |
| 91 | +""" |
| 92 | + |
| 93 | +datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'], |
| 94 | + outfields=info.keys()), |
| 95 | + name = 'datasource') |
| 96 | + |
| 97 | +datasource.inputs.template = "%s/%s" |
| 98 | + |
| 99 | +# This needs to point to the fdt folder you can find after extracting |
| 100 | +# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz |
| 101 | +datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/') |
| 102 | + |
| 103 | +datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz') |
| 104 | +datasource.inputs.template_args = info |
| 105 | +datasource.inputs.sort_filelist = True |
| 106 | + |
| 107 | +""" |
| 108 | +An inputnode is used to pass the data obtained by the data grabber to the actual processing functions |
| 109 | +""" |
| 110 | + |
| 111 | +inputnode = pe.Node(interface=util.IdentityInterface(fields=["dwi", "bvecs", "bvals"]), name="inputnode") |
| 112 | + |
| 113 | +""" |
| 114 | +Setup for Diffusion Tensor Computation |
| 115 | +-------------------------------------- |
| 116 | +In this section we create the nodes necessary for diffusion analysis. |
| 117 | +First, the diffusion image is converted to voxel order. |
| 118 | +""" |
| 119 | + |
| 120 | +image2voxel = pe.Node(interface=camino.Image2Voxel(), name="image2voxel") |
| 121 | +fsl2scheme = pe.Node(interface=camino.FSL2Scheme(), name="fsl2scheme") |
| 122 | +fsl2scheme.inputs.usegradmod = True |
| 123 | + |
| 124 | +""" |
| 125 | +Second, diffusion tensors are fit to the voxel-order data. |
| 126 | +""" |
| 127 | + |
| 128 | +dtifit = pe.Node(interface=camino.DTIFit(),name='dtifit') |
| 129 | + |
| 130 | +""" |
| 131 | +Next, a lookup table is generated from the schemefile and the |
| 132 | +signal-to-noise ratio (SNR) of the unweighted (q=0) data. |
| 133 | +""" |
| 134 | + |
| 135 | +dtlutgen = pe.Node(interface=camino.DTLUTGen(), name="dtlutgen") |
| 136 | +dtlutgen.inputs.snr = 16.0 |
| 137 | +dtlutgen.inputs.inversion = 1 |
| 138 | + |
| 139 | +""" |
| 140 | +In this tutorial we implement probabilistic tractography using the PICo algorithm. |
| 141 | +PICo tractography requires an estimate of the fibre direction and a model of its |
| 142 | +uncertainty in each voxel; this is produced using the following node. |
| 143 | +""" |
| 144 | + |
| 145 | +picopdfs = pe.Node(interface=camino.PicoPDFs(), name="picopdfs") |
| 146 | +picopdfs.inputs.inputmodel = 'dt' |
| 147 | + |
| 148 | +""" |
| 149 | +An FSL BET node creates a brain mask is generated from the diffusion image for seeding the PICo tractography. |
| 150 | +""" |
| 151 | + |
| 152 | +bet = pe.Node(interface=fsl.BET(), name="bet") |
| 153 | +bet.inputs.mask = True |
| 154 | + |
| 155 | +""" |
| 156 | +Finally, tractography is performed. |
| 157 | +First DT streamline tractography. |
| 158 | +""" |
| 159 | + |
| 160 | +trackdt = pe.Node(interface=camino.TrackDT(), name="trackdt") |
| 161 | + |
| 162 | +""" |
| 163 | +Now camino's Probablistic Index of connectivity algorithm. |
| 164 | +In this tutorial, we will use only 1 iteration for time-saving purposes. |
| 165 | +""" |
| 166 | + |
| 167 | +trackpico = pe.Node(interface=camino.TrackPICo(), name="trackpico") |
| 168 | +trackpico.inputs.iterations = 1 |
| 169 | + |
| 170 | +""" |
| 171 | +Currently, the best program for visualizing tracts is TrackVis. For this reason, a node is included to |
| 172 | +convert the raw tract data to .trk format. Solely for testing purposes, another node is added to perform the reverse. |
| 173 | +""" |
| 174 | + |
| 175 | +cam2trk_dt = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_dt") |
| 176 | +cam2trk_dt.inputs.min_length = 30 |
| 177 | +cam2trk_dt.inputs.voxel_order = 'LAS' |
| 178 | + |
| 179 | +cam2trk_pico = pe.Node(interface=cam2trk.Camino2Trackvis(), name="cam2trk_pico") |
| 180 | +cam2trk_pico.inputs.min_length = 30 |
| 181 | +cam2trk_pico.inputs.voxel_order = 'LAS' |
| 182 | + |
| 183 | +trk2camino = pe.Node(interface=cam2trk.Trackvis2Camino(), name="trk2camino") |
| 184 | + |
| 185 | +""" |
| 186 | +Tracts can also be converted to VTK and OOGL formats, for use in programs such as GeomView and Paraview, |
| 187 | +using the following two nodes. For VTK use VtkStreamlines. |
| 188 | +""" |
| 189 | + |
| 190 | +procstreamlines = pe.Node(interface=camino.ProcStreamlines(), name="procstreamlines") |
| 191 | +procstreamlines.inputs.outputtracts = 'oogl' |
| 192 | + |
| 193 | + |
| 194 | +""" |
| 195 | +We can also produce a variety of scalar values from our fitted tensors. The following nodes generate the |
| 196 | +fractional anisotropy and diffusivity trace maps and their associated headers. |
| 197 | +""" |
| 198 | + |
| 199 | +fa = pe.Node(interface=camino.ComputeFractionalAnisotropy(),name='fa') |
| 200 | +trace = pe.Node(interface=camino.ComputeTensorTrace(),name='trace') |
| 201 | +dteig = pe.Node(interface=camino.ComputeEigensystem(), name='dteig') |
| 202 | + |
| 203 | +analyzeheader_fa = pe.Node(interface= camino.AnalyzeHeader(), name = "analyzeheader_fa") |
| 204 | +analyzeheader_fa.inputs.datatype = "double" |
| 205 | +analyzeheader_trace = analyzeheader_fa.clone('analyzeheader_trace') |
| 206 | + |
| 207 | +fa2nii = pe.Node(interface=misc.CreateNifti(),name='fa2nii') |
| 208 | +trace2nii = fa2nii.clone("trace2nii") |
| 209 | + |
| 210 | +""" |
| 211 | +Since we have now created all our nodes, we can now define our workflow and start making connections. |
| 212 | +""" |
| 213 | + |
| 214 | +tractography = pe.Workflow(name='tractography') |
| 215 | + |
| 216 | +tractography.connect([(inputnode, bet,[("dwi","in_file")])]) |
| 217 | + |
| 218 | +""" |
| 219 | +File format conversion |
| 220 | +""" |
| 221 | + |
| 222 | +tractography.connect([(inputnode, image2voxel, [("dwi", "in_file")]), |
| 223 | + (inputnode, fsl2scheme, [("bvecs", "bvec_file"), |
| 224 | + ("bvals", "bval_file")]) |
| 225 | + ]) |
| 226 | + |
| 227 | +""" |
| 228 | +Tensor fitting |
| 229 | +""" |
| 230 | + |
| 231 | +tractography.connect([(image2voxel, dtifit,[['voxel_order','in_file']]), |
| 232 | + (fsl2scheme, dtifit,[['scheme','scheme_file']]) |
| 233 | + ]) |
| 234 | + |
| 235 | +""" |
| 236 | +Workflow for applying DT streamline tractogpahy |
| 237 | +""" |
| 238 | + |
| 239 | +tractography.connect([(bet, trackdt,[("mask_file","seed_file")])]) |
| 240 | +tractography.connect([(dtifit, trackdt,[("tensor_fitted","in_file")])]) |
| 241 | + |
| 242 | +""" |
| 243 | +Workflow for applying PICo |
| 244 | +""" |
| 245 | + |
| 246 | +tractography.connect([(bet, trackpico,[("mask_file","seed_file")])]) |
| 247 | +tractography.connect([(fsl2scheme, dtlutgen,[("scheme","scheme_file")])]) |
| 248 | +tractography.connect([(dtlutgen, picopdfs,[("dtLUT","luts")])]) |
| 249 | +tractography.connect([(dtifit, picopdfs,[("tensor_fitted","in_file")])]) |
| 250 | +tractography.connect([(picopdfs, trackpico,[("pdfs","in_file")])]) |
| 251 | + |
| 252 | +# ProcStreamlines might throw memory errors - comment this line out in such case |
| 253 | +tractography.connect([(trackdt, procstreamlines,[("tracked","in_file")])]) |
| 254 | + |
| 255 | + |
| 256 | +""" |
| 257 | +Connecting the Fractional Anisotropy and Trace nodes is simple, as they obtain their input from the |
| 258 | +tensor fitting. |
| 259 | +
|
| 260 | +This is also where our voxel- and data-grabbing functions come in. We pass these functions, along with |
| 261 | +the original DWI image from the input node, to the header-generating nodes. This ensures that the files |
| 262 | +will be correct and readable. |
| 263 | +""" |
| 264 | + |
| 265 | +tractography.connect([(dtifit, fa,[("tensor_fitted","in_file")])]) |
| 266 | +tractography.connect([(fa, analyzeheader_fa,[("fa","in_file")])]) |
| 267 | +tractography.connect([(inputnode, analyzeheader_fa,[(('dwi', get_vox_dims), 'voxel_dims'), |
| 268 | +(('dwi', get_data_dims), 'data_dims')])]) |
| 269 | +tractography.connect([(fa, fa2nii,[('fa','data_file')])]) |
| 270 | +tractography.connect([(inputnode, fa2nii,[(('dwi', get_affine), 'affine')])]) |
| 271 | +tractography.connect([(analyzeheader_fa, fa2nii,[('header', 'header_file')])]) |
| 272 | + |
| 273 | + |
| 274 | +tractography.connect([(dtifit, trace,[("tensor_fitted","in_file")])]) |
| 275 | +tractography.connect([(trace, analyzeheader_trace,[("trace","in_file")])]) |
| 276 | +tractography.connect([(inputnode, analyzeheader_trace,[(('dwi', get_vox_dims), 'voxel_dims'), |
| 277 | +(('dwi', get_data_dims), 'data_dims')])]) |
| 278 | +tractography.connect([(trace, trace2nii,[('trace','data_file')])]) |
| 279 | +tractography.connect([(inputnode, trace2nii,[(('dwi', get_affine), 'affine')])]) |
| 280 | +tractography.connect([(analyzeheader_trace, trace2nii,[('header', 'header_file')])]) |
| 281 | + |
| 282 | +tractography.connect([(dtifit, dteig,[("tensor_fitted","in_file")])]) |
| 283 | + |
| 284 | +tractography.connect([(trackpico, cam2trk_pico, [('tracked','in_file')])]) |
| 285 | +tractography.connect([(trackdt, cam2trk_dt, [('tracked','in_file')])]) |
| 286 | +tractography.connect([(inputnode, cam2trk_pico,[(('dwi', get_vox_dims), 'voxel_dims'), |
| 287 | + (('dwi', get_data_dims), 'data_dims')])]) |
| 288 | + |
| 289 | +tractography.connect([(inputnode, cam2trk_dt,[(('dwi', get_vox_dims), 'voxel_dims'), |
| 290 | + (('dwi', get_data_dims), 'data_dims')])]) |
| 291 | + |
| 292 | + |
| 293 | +""" |
| 294 | +Finally, we create another higher-level workflow to connect our tractography workflow with the info and datagrabbing nodes |
| 295 | +declared at the beginning. Our tutorial can is now extensible to any arbitrary number of subjects by simply adding |
| 296 | +their names to the subject list and their data to the proper folders. |
| 297 | +""" |
| 298 | + |
| 299 | +workflow = pe.Workflow(name="workflow") |
| 300 | +workflow.base_dir = os.path.abspath('camino_dti_tutorial') |
| 301 | +workflow.connect([(infosource,datasource,[('subject_id', 'subject_id')]), |
| 302 | + (datasource,tractography,[('dwi','inputnode.dwi'), |
| 303 | + ('bvals','inputnode.bvals'), |
| 304 | + ('bvecs','inputnode.bvecs') |
| 305 | + ]) |
| 306 | + ]) |
| 307 | +""" |
| 308 | +The following functions run the whole workflow and produce a .dot and .png graph of the processing pipeline. |
| 309 | +""" |
| 310 | + |
| 311 | +if __name__ == '__main__': |
| 312 | + workflow.run() |
| 313 | + workflow.write_graph() |
| 314 | + |
| 315 | +""" |
| 316 | +You can choose the format of the experted graph with the ``format`` option. For example ``workflow.write_graph(format='eps')`` |
| 317 | +
|
| 318 | +""" |
0 commit comments