Skip to content

Commit 65a4faa

Browse files
author
Erik Ziegler
committed
More HTML fixes
1 parent 35ec51c commit 65a4faa

File tree

2 files changed

+19
-11
lines changed

2 files changed

+19
-11
lines changed

examples/dmri_group_connectivity_camino.py

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@
5555

5656
import nipype.interfaces.fsl as fsl
5757
import nipype.interfaces.freesurfer as fs # freesurfer
58-
import os.path as op # system functions
58+
import os.path as op # system functions
5959
import cmp
6060
from nipype.workflows.dmri.camino.group_connectivity import create_group_connectivity_pipeline
6161
from nipype.workflows.dmri.connectivity.group_connectivity import (create_merge_networks_by_group_workflow,
@@ -103,8 +103,8 @@
103103
104104
.. warning::
105105
106-
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'.
107-
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
106+
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'.
107+
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
108108
109109
"""
110110

@@ -120,9 +120,9 @@
120120
"""
121121

122122
"""
123-
Next we create and run the second-level pipeline. The purpose of this workflow is simple:
124-
It is used to merge each subject's CFF file into one, so that there is a single file containing
125-
all of the networks for each group. This can be useful for performing Network Brain Statistics
123+
The purpose of the second-level workflow is simple: It is used to merge each
124+
subject's CFF file into one, so that there is a single file containing all of the
125+
networks for each group. This can be useful for performing Network Brain Statistics
126126
using the NBS plugin in ConnectomeViewer.
127127
128128
.. seealso::
@@ -137,11 +137,14 @@
137137
title += group_id
138138
if not idx == len(group_list.keys()) - 1:
139139
title += '-'
140+
140141
info = dict(dwi=[['subject_id', 'dti']],
141142
bvecs=[['subject_id', 'bvecs']],
142143
bvals=[['subject_id', 'bvals']])
144+
143145
l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info)
144146

147+
# Here we define the parcellation scheme and the number of tracks to produce
145148
parcellation_scheme = 'NativeFreesurfer'
146149
cmp_config = cmp.configuration.PipelineConfiguration()
147150
cmp_config.parcellation_scheme = parcellation_scheme
@@ -150,6 +153,7 @@
150153
l1pipeline.run()
151154
l1pipeline.write_graph(format='eps', graph2use='flat')
152155

156+
# The second-level pipeline is created here
153157
l2pipeline = create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir)
154158
l2pipeline.run()
155159
l2pipeline.write_graph(format='eps', graph2use='flat')

examples/dmri_group_connectivity_mrtrix.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -100,31 +100,31 @@
100100
"""
101101
102102
.. warning::
103+
103104
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'.
104105
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
105106
106-
107107
"""
108108

109109
"""
110-
This line creates the processing workflow given the information input about the groups and subjects.
110+
The workflow is created given the information input about the groups and subjects.
111111
112112
.. seealso::
113113
114114
* nipype/workflows/dmri/mrtrix/group_connectivity.py
115115
* nipype/workflows/dmri/mrtrix/connectivity_mapping.py
116-
* :ref:`dmri_connectivity_advanced
116+
* :ref:`dmri_connectivity_advanced`
117117
118118
"""
119119

120120
"""
121-
These values relate to the absolute threshold used on the fractional anisotropy map. This is done
121+
We set values for absolute threshold used on the fractional anisotropy map. This is done
122122
in order to identify single-fiber voxels. In brains with more damage, however, it may be necessary
123123
to reduce the threshold, since their brains are have lower average fractional anisotropy values.
124124
"""
125125

126126
"""
127-
These lines relate to inverting the b-vectors in the encoding file, and setting the
127+
We invert the b-vectors in the encoding file, and set the
128128
maximum harmonic order of the pre-tractography spherical deconvolution step. This is
129129
done to show how to set inputs that will affect both groups.
130130
"""
@@ -159,9 +159,12 @@
159159
else:
160160
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.7
161161

162+
# Here with invert the b-vectors in the Y direction and set the maximum harmonic order of the
163+
# spherical deconvolution step
162164
l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True
163165
l1pipeline.inputs.connectivity.mapping.csdeconv.maximum_harmonic_order = 6
164166

167+
# Here we define the parcellation scheme and the number of tracks to produce
165168
parcellation_name = 'scale500'
166169
l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name
167170
cmp_config = cmp.configuration.PipelineConfiguration()
@@ -172,6 +175,7 @@
172175
l1pipeline.run()
173176
l1pipeline.write_graph(format='eps', graph2use='flat')
174177

178+
# The second-level pipeline is created here
175179
l2pipeline = create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir)
176180
l2pipeline.inputs.l2inputnode.network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
177181
l2pipeline.run()

0 commit comments

Comments
 (0)