Skip to content

Commit 984aa21

Browse files
author
Erik Ziegler
committed
NetworkX and connectivity example updates
1 parent b06bb4f commit 984aa21

File tree

10 files changed

+370
-426
lines changed

10 files changed

+370
-426
lines changed

examples/dmri_group_connectivity_camino.py

Lines changed: 40 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
import os.path as op # system functions
5555
import cmp
5656
from nipype.workflows.dmri.camino.group_connectivity import create_group_connectivity_pipeline
57-
from nipype.workflows.dmri.connectivity.group_connectivity import (create_merge_networks_by_group_workflow,
57+
from nipype.workflows.dmri.connectivity.group_connectivity import (create_merge_networks_by_group_workflow,
5858
create_merge_group_networks_workflow, create_average_networks_by_group_workflow)
5959

6060
"""
@@ -101,63 +101,63 @@
101101
if not idx == len(group_list.keys()) - 1:
102102
title += '-'
103103

104-
"""
104+
"""
105105
106-
.. warning::
106+
.. warning::
107107
108-
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'.
109-
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
108+
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'.
109+
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
110110
111-
"""
111+
"""
112112

113-
info = dict(dwi=[['subject_id', 'dti']],
114-
bvecs=[['subject_id', 'bvecs']],
115-
bvals=[['subject_id', 'bvals']])
113+
info = dict(dwi=[['subject_id', 'dti']],
114+
bvecs=[['subject_id', 'bvecs']],
115+
bvals=[['subject_id', 'bvals']])
116116

117-
"""
118-
This line creates the processing workflow given the information input about the groups and subjects.
117+
"""
118+
This line creates the processing workflow given the information input about the groups and subjects.
119119
120-
.. seealso::
120+
.. seealso::
121121
122-
* nipype/workflows/dmri/mrtrix/group_connectivity.py
123-
* nipype/workflows/dmri/camino/connectivity_mapping.py
124-
* :ref:`dmri_connectivity`
122+
* nipype/workflows/dmri/mrtrix/group_connectivity.py
123+
* nipype/workflows/dmri/camino/connectivity_mapping.py
124+
* :ref:`dmri_connectivity`
125125
126-
"""
126+
"""
127127

128-
l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info)
128+
l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info)
129129

130-
"""
131-
Define the parcellation scheme to use.
132-
"""
130+
"""
131+
Define the parcellation scheme to use.
132+
"""
133133

134-
parcellation_name = 'scale500'
135-
cmp_config = cmp.configuration.PipelineConfiguration()
136-
cmp_config.parcellation_scheme = "Lausanne2008"
137-
l1pipeline.inputs.connectivity.inputnode.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
134+
parcellation_name = 'scale500'
135+
cmp_config = cmp.configuration.PipelineConfiguration()
136+
cmp_config.parcellation_scheme = "Lausanne2008"
137+
l1pipeline.inputs.connectivity.inputnode.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
138138

139-
"""
140-
The first level pipeline we have tweaked here is run within the for loop.
141-
"""
139+
"""
140+
The first level pipeline we have tweaked here is run within the for loop.
141+
"""
142142

143-
l1pipeline.run()
144-
l1pipeline.write_graph(format='eps', graph2use='flat')
143+
l1pipeline.run()
144+
l1pipeline.write_graph(format='eps', graph2use='flat')
145145

146-
"""
147-
Next we create and run the second-level pipeline. The purpose of this workflow is simple:
148-
It is used to merge each subject's CFF file into one, so that there is a single file containing
149-
all of the networks for each group. This can be useful for performing Network Brain Statistics
150-
using the NBS plugin in ConnectomeViewer.
146+
"""
147+
Next we create and run the second-level pipeline. The purpose of this workflow is simple:
148+
It is used to merge each subject's CFF file into one, so that there is a single file containing
149+
all of the networks for each group. This can be useful for performing Network Brain Statistics
150+
using the NBS plugin in ConnectomeViewer.
151151
152-
.. seealso::
152+
.. seealso::
153153
154-
http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html
154+
http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html
155155
156-
"""
156+
"""
157157

158-
l2pipeline = create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir)
159-
l2pipeline.run()
160-
l2pipeline.write_graph(format='eps', graph2use='flat')
158+
l2pipeline = create_merge_networks_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir)
159+
l2pipeline.run()
160+
l2pipeline.write_graph(format='eps', graph2use='flat')
161161

162162
"""
163163
Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects.

examples/dmri_group_connectivity_mrtrix.py

Lines changed: 61 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -99,91 +99,91 @@
9999
if not idx == len(group_list.keys()) - 1:
100100
title += '-'
101101

102-
"""
102+
"""
103103
104-
.. warning::
105-
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'.
106-
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
104+
.. warning::
105+
The 'info' dictionary below is used to define the input files. In this case, the diffusion weighted image contains the string 'dwi'.
106+
The same applies to the b-values and b-vector files, and this must be changed to fit your naming scheme.
107107
108108
109-
"""
110-
info = dict(dwi=[['subject_id', 'dti']],
111-
bvecs=[['subject_id', 'bvecs']],
112-
bvals=[['subject_id', 'bvals']])
109+
"""
110+
info = dict(dwi=[['subject_id', 'dti']],
111+
bvecs=[['subject_id', 'bvecs']],
112+
bvals=[['subject_id', 'bvals']])
113113

114-
"""
115-
This line creates the processing workflow given the information input about the groups and subjects.
114+
"""
115+
This line creates the processing workflow given the information input about the groups and subjects.
116116
117-
.. seealso::
117+
.. seealso::
118118
119-
* nipype/workflows/dmri/mrtrix/group_connectivity.py
120-
* nipype/workflows/dmri/mrtrix/connectivity_mapping.py
121-
* :ref:`dmri_connectivity_advanced
119+
* nipype/workflows/dmri/mrtrix/group_connectivity.py
120+
* nipype/workflows/dmri/mrtrix/connectivity_mapping.py
121+
* :ref:`dmri_connectivity_advanced
122122
123-
"""
123+
"""
124124

125-
l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info)
125+
l1pipeline = create_group_connectivity_pipeline(group_list, group_id, data_dir, subjects_dir, output_dir, info)
126126

127-
"""
128-
This is used to demonstrate the ease through which different parameters can be set for each group.
129-
These values relate to the absolute threshold used on the fractional anisotropy map. This is done
130-
in order to identify single-fiber voxels. In brains with more damage, however, it may be necessary
131-
to reduce the threshold, since their brains are have lower average fractional anisotropy values.
127+
"""
128+
This is used to demonstrate the ease through which different parameters can be set for each group.
129+
These values relate to the absolute threshold used on the fractional anisotropy map. This is done
130+
in order to identify single-fiber voxels. In brains with more damage, however, it may be necessary
131+
to reduce the threshold, since their brains are have lower average fractional anisotropy values.
132132
"""
133133

134-
if group_id == 'parkinsons':
135-
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.5
136-
else:
137-
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.7
134+
if group_id == 'parkinsons':
135+
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.5
136+
else:
137+
l1pipeline.inputs.connectivity.mapping.threshold_FA.absolute_threshold_value = 0.7
138138

139-
"""
140-
These lines relate to inverting the b-vectors in the encoding file, and setting the
141-
maximum harmonic order of the pre-tractography spherical deconvolution step. This is
142-
done to show how to set inputs that will affect both groups.
143-
"""
139+
"""
140+
These lines relate to inverting the b-vectors in the encoding file, and setting the
141+
maximum harmonic order of the pre-tractography spherical deconvolution step. This is
142+
done to show how to set inputs that will affect both groups.
143+
"""
144144

145-
l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True
146-
l1pipeline.inputs.connectivity.mapping.csdeconv.maximum_harmonic_order = 6
145+
l1pipeline.inputs.connectivity.mapping.fsl2mrtrix.invert_y = True
146+
l1pipeline.inputs.connectivity.mapping.csdeconv.maximum_harmonic_order = 6
147147

148-
"""
149-
Define the parcellation scheme to use.
150-
"""
148+
"""
149+
Define the parcellation scheme to use.
150+
"""
151151

152-
parcellation_name = 'scale500'
153-
l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name
154-
cmp_config = cmp.configuration.PipelineConfiguration()
155-
cmp_config.parcellation_scheme = "Lausanne2008"
156-
l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
152+
parcellation_name = 'scale500'
153+
l1pipeline.inputs.connectivity.mapping.Parcellate.parcellation_name = parcellation_name
154+
cmp_config = cmp.configuration.PipelineConfiguration()
155+
cmp_config.parcellation_scheme = "Lausanne2008"
156+
l1pipeline.inputs.connectivity.mapping.inputnode_within.resolution_network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
157157

158-
"""
159-
Set the maximum number of tracks to obtain
160-
"""
158+
"""
159+
Set the maximum number of tracks to obtain
160+
"""
161161

162-
l1pipeline.inputs.connectivity.mapping.probCSDstreamtrack.desired_number_of_tracks = 100000
162+
l1pipeline.inputs.connectivity.mapping.probCSDstreamtrack.desired_number_of_tracks = 100000
163163

164-
"""
165-
The first level pipeline we have tweaked here is run within the for loop.
166-
"""
164+
"""
165+
The first level pipeline we have tweaked here is run within the for loop.
166+
"""
167167

168-
l1pipeline.run()
169-
l1pipeline.write_graph(format='eps', graph2use='flat')
168+
l1pipeline.run()
169+
l1pipeline.write_graph(format='eps', graph2use='flat')
170170

171-
"""
172-
Next we create and run the second-level pipeline. The purpose of this workflow is simple:
173-
It is used to merge each subject's CFF file into one, so that there is a single file containing
174-
all of the networks for each group. This can be useful for performing Network Brain Statistics
175-
using the NBS plugin in ConnectomeViewer.
171+
"""
172+
Next we create and run the second-level pipeline. The purpose of this workflow is simple:
173+
It is used to merge each subject's CFF file into one, so that there is a single file containing
174+
all of the networks for each group. This can be useful for performing Network Brain Statistics
175+
using the NBS plugin in ConnectomeViewer.
176176
177-
.. seealso::
177+
.. seealso::
178178
179-
http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html
179+
http://www.connectomeviewer.org/documentation/users/tutorials/tut_nbs.html
180180
181-
"""
181+
"""
182182

183-
l2pipeline = create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir)
184-
l2pipeline.inputs.l2inputnode.network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
185-
l2pipeline.run()
186-
l2pipeline.write_graph(format='eps', graph2use='flat')
183+
l2pipeline = create_merge_network_results_by_group_workflow(group_list, group_id, data_dir, subjects_dir, output_dir)
184+
l2pipeline.inputs.l2inputnode.network_file = cmp_config._get_lausanne_parcellation('Lausanne2008')[parcellation_name]['node_information_graphml']
185+
l2pipeline.run()
186+
l2pipeline.write_graph(format='eps', graph2use='flat')
187187

188188
"""
189189
Now that the for loop is complete there are two grouped CFF files each containing the appropriate subjects.

0 commit comments

Comments
 (0)