diff --git a/THANKS.rst b/THANKS.rst index 4d8cdd47e7..71c4d9eeac 100644 --- a/THANKS.rst +++ b/THANKS.rst @@ -19,4 +19,3 @@ and `UL1 TR000442 University of Iowa Clinical and Translational Science Program We would also like to thank `JetBrains `__ for providing `Pycharm `__ licenses. - diff --git a/doc/_templates/indexsidebar.html b/doc/_templates/indexsidebar.html index ce03fb63f9..f771858968 100644 --- a/doc/_templates/indexsidebar.html +++ b/doc/_templates/indexsidebar.html @@ -11,4 +11,4 @@

{{ _('Links') }}

Python Versions -{% endblock %} +{% endblock %} diff --git a/doc/_templates/navbar.html b/doc/_templates/navbar.html index 9afb8ff8d5..6b2d457a1f 100644 --- a/doc/_templates/navbar.html +++ b/doc/_templates/navbar.html @@ -13,4 +13,3 @@ Developers · About · Nipy - diff --git a/doc/devel/filename_generation.rst b/doc/devel/filename_generation.rst index 5772ab33c1..dbf264c5dd 100644 --- a/doc/devel/filename_generation.rst +++ b/doc/devel/filename_generation.rst @@ -169,4 +169,3 @@ absolute path is generated and used for the ``cmdline`` when run, but In [80]: res.interface.inputs.outfile Out[80]: 'bar.nii' - diff --git a/doc/devel/gitwash/forking_hell.rst b/doc/devel/gitwash/forking_hell.rst index 1a3a163508..ac764c1c14 100644 --- a/doc/devel/gitwash/forking_hell.rst +++ b/doc/devel/gitwash/forking_hell.rst @@ -30,4 +30,3 @@ Create your own forked copy of nipype_ should find yourself at the home page for your own forked copy of nipype_. .. include:: links.inc - diff --git a/doc/devel/gitwash/git_development.rst b/doc/devel/gitwash/git_development.rst index 7b6e021752..3808ca9fa1 100644 --- a/doc/devel/gitwash/git_development.rst +++ b/doc/devel/gitwash/git_development.rst @@ -13,4 +13,3 @@ Contents: set_up_fork configure_git development_workflow - diff --git a/doc/devel/gitwash/index.rst b/doc/devel/gitwash/index.rst index 8cc6750192..aaf1fff1b3 100644 --- a/doc/devel/gitwash/index.rst +++ b/doc/devel/gitwash/index.rst @@ -14,5 +14,3 @@ Contents: patching git_development git_resources - - diff --git a/doc/devel/gitwash/set_up_fork.rst b/doc/devel/gitwash/set_up_fork.rst index 2349d5852b..3105135155 100644 --- a/doc/devel/gitwash/set_up_fork.rst +++ b/doc/devel/gitwash/set_up_fork.rst @@ -65,4 +65,3 @@ Just for your own satisfaction, show yourself that you now have a new origin git@github.com:your-user-name/nipype.git (push) .. include:: links.inc - diff --git a/nipype/caching/memory.py b/nipype/caching/memory.py index 34d5ac1927..d2946710f1 100644 --- a/nipype/caching/memory.py +++ b/nipype/caching/memory.py @@ -287,7 +287,7 @@ def clear_runs_since(self, day=None, month=None, year=None, warn=True): os.remove(log_name) def _clear_all_but(self, runs, warn=True): - """Remove all the runs appart from those given to the function + """Remove all the runs apart from those given to the function input. """ rm_all_but(self.base_dir, set(runs.keys()), warn=warn) diff --git a/nipype/interfaces/afni/base.py b/nipype/interfaces/afni/base.py index 660e913dc3..28dc1dc410 100644 --- a/nipype/interfaces/afni/base.py +++ b/nipype/interfaces/afni/base.py @@ -262,8 +262,8 @@ def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None) Generate a filename based on the given parameters. The filename will take the form: cwd/basename. - If change_ext is True, it will use the extentions specified in - intputs.output_type. + If change_ext is True, it will use the extensions specified in + inputs.output_type. Parameters ---------- diff --git a/nipype/interfaces/afni/model.py b/nipype/interfaces/afni/model.py index 2e6d2fc15a..1235f8afff 100644 --- a/nipype/interfaces/afni/model.py +++ b/nipype/interfaces/afni/model.py @@ -265,7 +265,7 @@ class DeconvolveInputSpec(AFNICommandInputSpec): class DeconvolveOutputSpec(TraitedSpec): out_file = File(desc="output statistics file", exists=True) reml_script = File( - desc="automatical generated script to run 3dREMLfit", exists=True + desc="automatically generated script to run 3dREMLfit", exists=True ) x1D = File(desc="save out X matrix", exists=True) cbucket = File(desc="output regression coefficients file (if generated)") @@ -528,7 +528,7 @@ class RemlfitInputSpec(AFNICommandInputSpec): "be included.", argstr="-Rglt %s", ) - fitts_file = File(desc="ouput dataset for REML fitted model", argstr="-Rfitts %s") + fitts_file = File(desc="output dataset for REML fitted model", argstr="-Rfitts %s") errts_file = File( desc="output dataset for REML residuals = data - fitted model", argstr="-Rerrts %s", @@ -584,7 +584,7 @@ class RemlfitOutputSpec(AFNICommandOutputSpec): "but ONLY for the GLTs added on the REMLfit command " "line itself via 'gltsym' (if generated)" ) - fitts_file = File(desc="ouput dataset for REML fitted model (if generated)") + fitts_file = File(desc="output dataset for REML fitted model (if generated)") errts_file = File( desc="output dataset for REML residuals = data - fitted model (if " "generated" ) diff --git a/nipype/interfaces/afni/preprocess.py b/nipype/interfaces/afni/preprocess.py index fad5cbdf2f..4fdb006554 100644 --- a/nipype/interfaces/afni/preprocess.py +++ b/nipype/interfaces/afni/preprocess.py @@ -821,7 +821,7 @@ class AutoTLRCInputSpec(CommandLineInputSpec): class AutoTLRC(AFNICommand): - """A minmal wrapper for the AutoTLRC script + """A minimal wrapper for the AutoTLRC script The only option currently supported is no_ss. For complete details, see the `3dQwarp Documentation. `_ @@ -1327,7 +1327,7 @@ class ECMInputSpec(CentralityInputSpec): ) fecm = traits.Bool( desc="Fast centrality method; substantial speed increase but cannot " - "accomodate thresholding; automatically selected if -thresh or " + "accommodate thresholding; automatically selected if -thresh or " "-sparsity are not set", argstr="-fecm", ) @@ -2933,7 +2933,7 @@ class TProjectInputSpec(AFNICommandInputSpec): the output dataset: * mode = ZERO -- put zero values in their place; - output datset is same length as input + output dataset is same length as input * mode = KILL -- remove those time points; output dataset is shorter than input * mode = NTRP -- censored values are replaced by interpolated @@ -3073,7 +3073,7 @@ class TProject(AFNICommand): as ``-passband``. In this way, you can bandpass time-censored data, and at the same time, remove other time series of no interest (e.g., physiological estimates, motion parameters). - Shifts voxel time series from input so that seperate slices are aligned to + Shifts voxel time series from input so that separate slices are aligned to the same temporal origin. Examples @@ -3188,7 +3188,7 @@ class TShiftOutputSpec(AFNICommandOutputSpec): class TShift(AFNICommand): - """Shifts voxel time series from input so that seperate slices are aligned + """Shifts voxel time series from input so that separate slices are aligned to the same temporal origin. For complete details, see the `3dTshift Documentation. @@ -3658,7 +3658,7 @@ class QwarpInputSpec(AFNICommandInputSpec): with 3dNwarpApply and 3dNwarpCat, for example. * To be clear, this is the warp from source dataset coordinates to base dataset coordinates, where the - values at each base grid point are the xyz displacments + values at each base grid point are the xyz displacements needed to move that grid point's xyz values to the corresponding xyz values in the source dataset: base( (x,y,z) + WARP(x,y,z) ) matches source(x,y,z) @@ -4034,7 +4034,7 @@ class QwarpInputSpec(AFNICommandInputSpec): The goal is greater speed, and it seems to help this" positively piggish program to be more expeditious." * However, accuracy is somewhat lower with '-duplo'," - for reasons that currenly elude Zhark; for this reason," + for reasons that currently elude Zhark; for this reason," the Emperor does not usually use '-duplo'. """, @@ -4225,21 +4225,21 @@ class QwarpInputSpec(AFNICommandInputSpec): ) hel = traits.Bool( desc="Hellinger distance: a matching function for the adventurous" - "This option has NOT be extensively tested for usefullness" + "This option has NOT be extensively tested for usefulness" "and should be considered experimental at this infundibulum.", argstr="-hel", xor=["nmi", "mi", "lpc", "lpa", "pear"], ) mi = traits.Bool( desc="Mutual Information: a matching function for the adventurous" - "This option has NOT be extensively tested for usefullness" + "This option has NOT be extensively tested for usefulness" "and should be considered experimental at this infundibulum.", argstr="-mi", xor=["mi", "hel", "lpc", "lpa", "pear"], ) nmi = traits.Bool( desc="Normalized Mutual Information: a matching function for the adventurous" - "This option has NOT been extensively tested for usefullness" + "This option has NOT been extensively tested for usefulness" "and should be considered experimental at this infundibulum.", argstr="-nmi", xor=["nmi", "hel", "lpc", "lpa", "pear"], diff --git a/nipype/interfaces/afni/utils.py b/nipype/interfaces/afni/utils.py index 9c44a40fd8..bf6f339d1d 100644 --- a/nipype/interfaces/afni/utils.py +++ b/nipype/interfaces/afni/utils.py @@ -539,27 +539,27 @@ class CatInputSpec(AFNICommandInputSpec): argstr="-sel %s", ) out_int = traits.Bool( - desc="specifiy int data type for output", + desc="specify int data type for output", argstr="-i", xor=["out_format", "out_nice", "out_double", "out_fint", "out_cint"], ) out_nice = traits.Bool( - desc="specifiy nice data type for output", + desc="specify nice data type for output", argstr="-n", xor=["out_format", "out_int", "out_double", "out_fint", "out_cint"], ) out_double = traits.Bool( - desc="specifiy double data type for output", + desc="specify double data type for output", argstr="-d", xor=["out_format", "out_nice", "out_int", "out_fint", "out_cint"], ) out_fint = traits.Bool( - desc="specifiy int, rounded down, data type for output", + desc="specify int, rounded down, data type for output", argstr="-f", xor=["out_format", "out_nice", "out_double", "out_int", "out_cint"], ) out_cint = traits.Bool( - desc="specifiy int, rounded up, data type for output", + desc="specify int, rounded up, data type for output", xor=["out_format", "out_nice", "out_double", "out_fint", "out_int"], ) @@ -693,7 +693,7 @@ class CenterMassInputSpec(CommandLineInputSpec): argstr="-set %f %f %f", ) local_ijk = traits.Bool( - desc="Output values as (i,j,k) in local orienation", argstr="-local_ijk" + desc="Output values as (i,j,k) in local orientation", argstr="-local_ijk" ) roi_vals = traits.List( traits.Int, @@ -2554,11 +2554,11 @@ class ReHoInputSpec(CommandLineInputSpec): class ReHoOutputSpec(TraitedSpec): out_file = File(exists=True, desc="Voxelwise regional homogeneity map") - out_vals = File(desc="Table of labelwise regional homogenity values") + out_vals = File(desc="Table of labelwise regional homogeneity values") class ReHo(AFNICommandBase): - """Compute regional homogenity for a given neighbourhood.l, + """Compute regional homogeneity for a given neighbourhood.l, based on a local neighborhood of that voxel. For complete details, see the `3dReHo Documentation. @@ -2740,7 +2740,7 @@ class TCatSBInputSpec(AFNICommandInputSpec): class TCatSubBrick(AFNICommand): """Hopefully a temporary function to allow sub-brick selection until - afni file managment is improved. + afni file management is improved. For complete details, see the `3dTcat Documentation. `_ diff --git a/nipype/interfaces/c3.py b/nipype/interfaces/c3.py index c91c02569c..e9a3aed5eb 100644 --- a/nipype/interfaces/c3.py +++ b/nipype/interfaces/c3.py @@ -84,7 +84,7 @@ class C3dInputSpec(CommandLineInputSpec): desc=( "Write all images on the convert3d stack as multiple files." " Supports both list of output files or a pattern for the output" - " filenames (using %d substituion)." + " filenames (using %d substitution)." ), ) pix_type = traits.Enum( diff --git a/nipype/interfaces/dcmstack.py b/nipype/interfaces/dcmstack.py index bc18659c93..6909c0fc8f 100644 --- a/nipype/interfaces/dcmstack.py +++ b/nipype/interfaces/dcmstack.py @@ -55,7 +55,7 @@ class NiftiGeneratorBase(BaseInterface): embedded meta data.""" def _get_out_path(self, meta, idx=None): - """Return the output path for the gernerated Nifti.""" + """Return the output path for the generated Nifti.""" if self.inputs.out_format: out_fmt = self.inputs.out_format else: @@ -262,7 +262,7 @@ def _outputs(self): return outputs def _run_interface(self, runtime): - # If the 'meta_keys' input is a list, covert it to a dict + # If the 'meta_keys' input is a list, convert it to a dict self._make_name_map() nw = NiftiWrapper.from_filename(self.inputs.in_file) self.result = {} @@ -342,7 +342,7 @@ class MergeNiftiInputSpec(NiftiGeneratorBaseInputSpec): merge_dim = traits.Int( desc="Dimension to merge along. If not " "specified, the last singular or " - "non-existant dimension is used." + "non-existent dimension is used." ) diff --git a/nipype/interfaces/freesurfer/preprocess.py b/nipype/interfaces/freesurfer/preprocess.py index a30749cf56..4c9009cd10 100644 --- a/nipype/interfaces/freesurfer/preprocess.py +++ b/nipype/interfaces/freesurfer/preprocess.py @@ -2361,8 +2361,8 @@ def _format_arg(self, name, spec, value): def _list_outputs(self): outputs = self.output_spec().get() cwd = os.getcwd() - prefices = dict(src=self.inputs.source_file, trg=self.inputs.target_file) - suffices = dict( + prefixes = dict(src=self.inputs.source_file, trg=self.inputs.target_file) + suffixes = dict( out_reg_file=("src", "_robustreg.lta", False), registered_file=("src", "_robustreg", True), weights_file=("src", "_robustweights", True), @@ -2372,12 +2372,12 @@ def _list_outputs(self): half_source_xfm=("src", "_robustxfm.lta", False), half_targ_xfm=("trg", "_robustxfm.lta", False), ) - for name, sufftup in list(suffices.items()): + for name, sufftup in list(suffixes.items()): value = getattr(self.inputs, name) if value: if value is True: outputs[name] = fname_presuffix( - prefices[sufftup[0]], + prefixes[sufftup[0]], suffix=sufftup[1], newpath=cwd, use_ext=sufftup[2], diff --git a/nipype/interfaces/io.py b/nipype/interfaces/io.py index 78d8efc797..ea06482500 100644 --- a/nipype/interfaces/io.py +++ b/nipype/interfaces/io.py @@ -764,7 +764,7 @@ def _list_outputs(self): out_files.append(s3dst) # Otherwise, copy locally src -> dst if not s3_flag or isdefined(self.inputs.local_copy): - # Create output directory if it doesnt exist + # Create output directory if it doesn't exist if not os.path.exists(path): try: os.makedirs(path) @@ -1318,7 +1318,7 @@ class SelectFilesInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): sort_filelist = traits.Bool( True, usedefault=True, - desc="When matching mutliple files, return them" " in sorted order.", + desc="When matching multiple files, return them" " in sorted order.", ) raise_on_empty = traits.Bool( True, @@ -1346,7 +1346,7 @@ class SelectFiles(IOBase): This interface uses Python's {}-based string formatting syntax to plug values (possibly known only at workflow execution time) into string - templates and collect files from persistant storage. These templates can + templates and collect files from persistent storage. These templates can also be combined with glob wildcards (``*``, ``?``) and character ranges (``[...]``). The field names in the formatting template (i.e. the terms in braces) will become inputs fields on the interface, and the keys in the templates @@ -1513,7 +1513,7 @@ class DataFinder(IOBase): Will recursively search any subdirectories by default. This can be limited with the min/max depth options. Matched paths are available in the output 'out_paths'. Any named groups of - captured text from the regular expression are also available as ouputs of + captured text from the regular expression are also available as outputs of the same name. Examples @@ -1583,7 +1583,7 @@ def _run_interface(self, runtime): ] self.result = None for root_path in self.inputs.root_paths: - # Handle tilda/env variables and remove extra seperators + # Handle tilda/env variables and remove extra separators root_path = os.path.normpath( os.path.expandvars(os.path.expanduser(root_path)) ) @@ -1612,7 +1612,7 @@ def _run_interface(self, runtime): for key, vals in list(self.result.items()): self.result[key] = vals[0] else: - # sort all keys acording to out_paths + # sort all keys according to out_paths for key in list(self.result.keys()): if key == "out_paths": continue @@ -2083,7 +2083,7 @@ class XNATSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): assessor_id = Str( desc=( - "Option to customize ouputs representation in XNAT - " + "Option to customize outputs representation in XNAT - " "assessor level will be used with specified id" ), xor=["reconstruction_id"], @@ -2091,7 +2091,7 @@ class XNATSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): reconstruction_id = Str( desc=( - "Option to customize ouputs representation in XNAT - " + "Option to customize outputs representation in XNAT - " "reconstruction level will be used with specified id" ), xor=["assessor_id"], diff --git a/nipype/interfaces/matlab.py b/nipype/interfaces/matlab.py index 294abdf3ef..f68c5ea43d 100644 --- a/nipype/interfaces/matlab.py +++ b/nipype/interfaces/matlab.py @@ -191,7 +191,7 @@ def _gen_matlab_command(self, argstr, script_lines): else: prescript.insert(0, "fprintf(1,'Executing code at %s:\\n',datestr(now));") for path in paths: - # addpath() is not available after compliation + # addpath() is not available after compilation # https://www.mathworks.com/help/compiler/ismcc.html # https://www.mathworks.com/help/compiler/isdeployed.html prescript.append("if ~(ismcc || isdeployed), addpath('%s'); end;\n" % path) diff --git a/nipype/interfaces/minc/minc.py b/nipype/interfaces/minc/minc.py index 0d4c302f94..6948f0453a 100644 --- a/nipype/interfaces/minc/minc.py +++ b/nipype/interfaces/minc/minc.py @@ -350,7 +350,7 @@ class ToRawOutputSpec(TraitedSpec): class ToRaw(StdOutCommandLine): """Dump a chunk of MINC file data. This program is largely - superceded by mincextract (see Extract). + superseded by mincextract (see Extract). Examples -------- @@ -518,32 +518,32 @@ class ToEcatInputSpec(CommandLineInputSpec): ) ignore_patient_variable = traits.Bool( - desc="Ignore informations from the minc patient variable.", + desc="Ignore information from the minc patient variable.", argstr="-ignore_patient_variable", ) ignore_study_variable = traits.Bool( - desc="Ignore informations from the minc study variable.", + desc="Ignore information from the minc study variable.", argstr="-ignore_study_variable", ) ignore_acquisition_variable = traits.Bool( - desc="Ignore informations from the minc acquisition variable.", + desc="Ignore information from the minc acquisition variable.", argstr="-ignore_acquisition_variable", ) ignore_ecat_acquisition_variable = traits.Bool( - desc="Ignore informations from the minc ecat_acquisition variable.", + desc="Ignore information from the minc ecat_acquisition variable.", argstr="-ignore_ecat_acquisition_variable", ) ignore_ecat_main = traits.Bool( - desc="Ignore informations from the minc ecat-main variable.", + desc="Ignore information from the minc ecat-main variable.", argstr="-ignore_ecat_main", ) ignore_ecat_subheader_variable = traits.Bool( - desc="Ignore informations from the minc ecat-subhdr variable.", + desc="Ignore information from the minc ecat-subhdr variable.", argstr="-ignore_ecat_subheader_variable", ) @@ -1553,7 +1553,7 @@ class PikInputSpec(CommandLineInputSpec): ) start = traits.Int( - desc="Slice number to get. (note this is in voxel co-ordinates).", + desc="Slice number to get. (note this is in voxel coordinates).", argstr="--slice %s", ) # FIXME Int is correct? @@ -1565,7 +1565,7 @@ class PikInputSpec(CommandLineInputSpec): slice_y = traits.Bool(desc="Get a coronal (y) slice.", argstr="-y", xor=_xor_slice) slice_x = traits.Bool( desc="Get a sagittal (x) slice.", argstr="-x", xor=_xor_slice - ) # FIXME typo in man page? sagital? + ) # FIXME typo in man page? sagittal? triplanar = traits.Bool( desc="Create a triplanar view of the input file.", argstr="--triplanar" @@ -2759,7 +2759,7 @@ class NormInputSpec(CommandLineInputSpec): exists=True, ) clamp = traits.Bool( - desc="Force the ouput range between limits [default].", + desc="Force the output range between limits [default].", argstr="-clamp", usedefault=True, default_value=True, diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py index f315e9fc7c..283677c59a 100644 --- a/nipype/interfaces/petpvc.py +++ b/nipype/interfaces/petpvc.py @@ -195,8 +195,8 @@ def _gen_fname( """Generate a filename based on the given parameters. The filename will take the form: cwd/basename. - If change_ext is True, it will use the extentions specified in - intputs.output_type. + If change_ext is True, it will use the extensions specified in + inputs.output_type. Parameters ---------- diff --git a/nipype/interfaces/semtools/brains/segmentation.py b/nipype/interfaces/semtools/brains/segmentation.py index 2c97b86842..79e25c2bda 100644 --- a/nipype/interfaces/semtools/brains/segmentation.py +++ b/nipype/interfaces/semtools/brains/segmentation.py @@ -114,7 +114,7 @@ class BRAINSTalairach(SEMLikeCommandLine): category: BRAINS.Segmentation - description: This program creates a VTK structured grid defining the Talairach coordinate system based on four points: AC, PC, IRP, and SLA. The resulting structred grid can be written as either a classic VTK file or the new VTK XML file format. Two representations of the resulting grid can be written. The first is a bounding box representation that also contains the location of the AC and PC points. The second representation is the full Talairach grid representation that includes the additional rows of boxes added to the inferior allowing full coverage of the cerebellum. + description: This program creates a VTK structured grid defining the Talairach coordinate system based on four points: AC, PC, IRP, and SLA. The resulting structured grid can be written as either a classic VTK file or the new VTK XML file format. Two representations of the resulting grid can be written. The first is a bounding box representation that also contains the location of the AC and PC points. The second representation is the full Talairach grid representation that includes the additional rows of boxes added to the inferior allowing full coverage of the cerebellum. version: 0.1 diff --git a/nipype/interfaces/semtools/brains/utilities.py b/nipype/interfaces/semtools/brains/utilities.py index bed7438271..78ee3c25c5 100644 --- a/nipype/interfaces/semtools/brains/utilities.py +++ b/nipype/interfaces/semtools/brains/utilities.py @@ -173,7 +173,7 @@ class GeneratePurePlugMaskInputSpec(CommandLineInputSpec): ) numberOfSubSamples = InputMultiPath( traits.Int, - desc="Number of continous index samples taken at each direction of lattice space for each plug volume", + desc="Number of continuous index samples taken at each direction of lattice space for each plug volume", sep=",", argstr="--numberOfSubSamples %s", ) diff --git a/nipype/interfaces/semtools/diffusion/diffusion.py b/nipype/interfaces/semtools/diffusion/diffusion.py index 8cc5a320e6..d352adf276 100644 --- a/nipype/interfaces/semtools/diffusion/diffusion.py +++ b/nipype/interfaces/semtools/diffusion/diffusion.py @@ -49,7 +49,7 @@ class dtiaverage(SEMLikeCommandLine): category: Diffusion.Diffusion Tensor Images.CommandLineOnly description: dtiaverage is a program that allows to compute the average of an arbitrary number of tensor fields (listed after the --inputs option) This program is used in our pipeline as the last step of the atlas building processing. When all the tensor fields have been deformed in the same space, to create the average tensor field (--tensor_output) we use dtiaverage. - Several average method can be used (specified by the --method option): euclidian, log-euclidian and pga. The default being euclidian. + Several average method can be used (specified by the --method option): euclidean, log-euclidean and pga. The default being euclidean. version: 1.0.0 @@ -118,7 +118,7 @@ class dtiestimInputSpec(CommandLineInputSpec): "wls", "nls", "ml", - desc="Esitmation method (lls:linear least squares, wls:weighted least squares, nls:non-linear least squares, ml:maximum likelihood)", + desc="Estimation method (lls:linear least squares, wls:weighted least squares, nls:non-linear least squares, ml:maximum likelihood)", argstr="--method %s", ) correction = traits.Enum( @@ -214,7 +214,7 @@ class dtiestim(SEMLikeCommandLine): contributor: Casey Goodlett, Francois Budin - acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependencies on boost and a fortran compiler. """ input_spec = dtiestimInputSpec @@ -553,7 +553,7 @@ class DWIConvertInputSpec(CommandLineInputSpec): argstr="--useIdentityMeaseurementFrame ", ) useBMatrixGradientDirections = traits.Bool( - desc="Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data. In some cases the standard public gradients are not properly computed. The gradients can emperically computed from the private BMatrix fields. In some cases the private BMatrix is consistent with the public grandients, but not in all cases, when it exists BMatrix is usually most robust.", + desc="Fill the nhdr header with the gradient directions and bvalues computed out of the BMatrix. Only changes behavior for Siemens data. In some cases the standard public gradients are not properly computed. The gradients can empirically computed from the private BMatrix fields. In some cases the private BMatrix is consistent with the public grandients, but not in all cases, when it exists BMatrix is usually most robust.", argstr="--useBMatrixGradientDirections ", ) outputDirectory = traits.Either( diff --git a/nipype/interfaces/semtools/diffusion/gtract.py b/nipype/interfaces/semtools/diffusion/gtract.py index eb8e05f4f5..e6342c8a06 100644 --- a/nipype/interfaces/semtools/diffusion/gtract.py +++ b/nipype/interfaces/semtools/diffusion/gtract.py @@ -275,7 +275,7 @@ class gtractCoregBvaluesInputSpec(CommandLineInputSpec): argstr="--outputTransform %s", ) eddyCurrentCorrection = traits.Bool( - desc="Flag to perform eddy current corection in addition to motion correction (recommended)", + desc="Flag to perform eddy current correction in addition to motion correction (recommended)", argstr="--eddyCurrentCorrection ", ) numberOfIterations = traits.Int( @@ -501,7 +501,7 @@ class gtractCopyImageOrientationInputSpec(CommandLineInputSpec): argstr="--inputVolume %s", ) inputReferenceVolume = File( - desc="Required: input file containing orietation that will be cloned.", + desc="Required: input file containing orientation that will be cloned.", exists=True, argstr="--inputReferenceVolume %s", ) @@ -1224,7 +1224,7 @@ class gtractCoRegAnatomy(SEMLikeCommandLine): category: Diffusion.GTRACT - description: This program will register a Nrrd diffusion weighted 4D vector image to a fixed anatomical image. Two registration methods are supported for alignment with anatomical images: Rigid and B-Spline. The rigid registration performs a rigid body registration with the anatomical images and should be done as well to initialize the B-Spline transform. The B-SPline transform is the deformable transform, where the user can control the amount of deformation based on the number of control points as well as the maximum distance that these points can move. The B-Spline registration places a low dimensional grid in the image, which is deformed. This allows for some susceptibility related distortions to be removed from the diffusion weighted images. In general the amount of motion in the slice selection and read-out directions direction should be kept low. The distortion is in the phase encoding direction in the images. It is recommended that skull stripped (i.e. image containing only brain with skull removed) images shoud be used for image co-registration with the B-Spline transform. + description: This program will register a Nrrd diffusion weighted 4D vector image to a fixed anatomical image. Two registration methods are supported for alignment with anatomical images: Rigid and B-Spline. The rigid registration performs a rigid body registration with the anatomical images and should be done as well to initialize the B-Spline transform. The B-SPline transform is the deformable transform, where the user can control the amount of deformation based on the number of control points as well as the maximum distance that these points can move. The B-Spline registration places a low dimensional grid in the image, which is deformed. This allows for some susceptibility related distortions to be removed from the diffusion weighted images. In general the amount of motion in the slice selection and read-out directions direction should be kept low. The distortion is in the phase encoding direction in the images. It is recommended that skull stripped (i.e. image containing only brain with skull removed) images should be used for image co-registration with the B-Spline transform. version: 4.0.0 @@ -1542,7 +1542,7 @@ class gtractFiberTracking(SEMLikeCommandLine): category: Diffusion.GTRACT - description: This program implements four fiber tracking methods (Free, Streamline, GraphSearch, Guided). The output of the fiber tracking is vtkPolyData (i.e. Polylines) that can be loaded into Slicer3 for visualization. The poly data can be saved in either old VTK format files (.vtk) or in the new VTK XML format (.xml). The polylines contain point data that defines ther Tensor at each point along the fiber tract. This can then be used to rendered as glyphs in Slicer3 and can be used to define severeal scalar measures without referencing back to the anisotropy images. (1) Free tracking is a basic streamlines algorithm. This is a direct implementation of the method original proposed by Basser et al. The tracking follows the primarty eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either as a result of maximum fiber length, low ansiotropy, or large curvature. This is a great way to explore your data. (2) The streamlines algorithm is a direct implementation of the method originally proposed by Basser et al. The tracking follows the primary eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either by reaching the ending region or reaching some stopping criteria. Stopping criteria are specified using the following parameters: tracking threshold, curvature threshold, and max length. Only paths terminating in the ending region are kept in this method. The TEND algorithm proposed by Lazar et al. (Human Brain Mapping 18:306-321, 2003) has been instrumented. This can be enabled using the --useTend option while performing Streamlines tracking. This utilizes the entire diffusion tensor to deflect the incoming vector instead of simply following the primary eigenvector. The TEND parameters are set using the --tendF and --tendG options. (3) Graph Search tracking is the first step in the full GTRACT algorithm developed by Cheng et al. (NeuroImage 31(3): 1075-1085, 2006) for finding the tracks in a tensor image. This method was developed to generate fibers in a Tensor representation where crossing fibers occur. The graph search algorithm follows the primary eigenvector in non-ambigous regions and utilizes branching and a graph search algorithm in ambigous regions. Ambiguous tracking regions are defined based on two criteria: Branching Al Threshold (anisotropy values below this value and above the traching threshold) and Curvature Major Eigen (angles of the primary eigenvector direction and the current tracking direction). In regions that meet this criteria, two or three tracking paths are considered. The first is the standard primary eigenvector direction. The second is the seconadary eigenvector direction. This is based on the assumption that these regions may be prolate regions. If the Random Walk option is selected then a third direction is also considered. This direction is defined by a cone pointing from the current position to the centroid of the ending region. The interior angle of the cone is specified by the user with the Branch/Guide Angle parameter. A vector contained inside of the cone is selected at random and used as the third direction. This method can also utilize the TEND option where the primary tracking direction is that specified by the TEND method instead of the primary eigenvector. The parameter '--maximumBranchPoints' allows the tracking to have this number of branches being considered at a time. If this number of branch points is exceeded at any time, then the algorithm will revert back to a streamline alogrithm until the number of branches is reduced. This allows the user to constrain the computational complexity of the algorithm. (4) The second phase of the GTRACT algorithm is Guided Tracking. This method incorporates anatomical information about the track orientation using an initial guess of the fiber track. In the originally proposed GTRACT method, this would be created from the fibers resulting from the Graph Search tracking. However, in practice this can be created using any method and could be defined manually. To create the guide fiber the program gtractCreateGuideFiber can be used. This program will load a fiber tract that has been generated and create a centerline representation of the fiber tract (i.e. a single fiber). In this method, the fiber tracking follows the primary eigenvector direction unless it deviates from the guide fiber track by a angle greater than that specified by the '--guidedCurvatureThreshold' parameter. The user must specify the guide fiber when running this program. + description: This program implements four fiber tracking methods (Free, Streamline, GraphSearch, Guided). The output of the fiber tracking is vtkPolyData (i.e. Polylines) that can be loaded into Slicer3 for visualization. The poly data can be saved in either old VTK format files (.vtk) or in the new VTK XML format (.xml). The polylines contain point data that defines the Tensor at each point along the fiber tract. This can then be used to rendered as glyphs in Slicer3 and can be used to define several scalar measures without referencing back to the anisotropy images. (1) Free tracking is a basic streamlines algorithm. This is a direct implementation of the method original proposed by Basser et al. The tracking follows the primarty eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either as a result of maximum fiber length, low ansiotropy, or large curvature. This is a great way to explore your data. (2) The streamlines algorithm is a direct implementation of the method originally proposed by Basser et al. The tracking follows the primary eigenvector. The tracking begins with seed points in the starting region. Only those voxels above the specified anisotropy threshold in the starting region are used as seed points. Tracking terminates either by reaching the ending region or reaching some stopping criteria. Stopping criteria are specified using the following parameters: tracking threshold, curvature threshold, and max length. Only paths terminating in the ending region are kept in this method. The TEND algorithm proposed by Lazar et al. (Human Brain Mapping 18:306-321, 2003) has been instrumented. This can be enabled using the --useTend option while performing Streamlines tracking. This utilizes the entire diffusion tensor to deflect the incoming vector instead of simply following the primary eigenvector. The TEND parameters are set using the --tendF and --tendG options. (3) Graph Search tracking is the first step in the full GTRACT algorithm developed by Cheng et al. (NeuroImage 31(3): 1075-1085, 2006) for finding the tracks in a tensor image. This method was developed to generate fibers in a Tensor representation where crossing fibers occur. The graph search algorithm follows the primary eigenvector in non-ambiguous regions and utilizes branching and a graph search algorithm in ambiguous regions. Ambiguous tracking regions are defined based on two criteria: Branching Al Threshold (anisotropy values below this value and above the traching threshold) and Curvature Major Eigen (angles of the primary eigenvector direction and the current tracking direction). In regions that meet this criteria, two or three tracking paths are considered. The first is the standard primary eigenvector direction. The second is the seconadary eigenvector direction. This is based on the assumption that these regions may be prolate regions. If the Random Walk option is selected then a third direction is also considered. This direction is defined by a cone pointing from the current position to the centroid of the ending region. The interior angle of the cone is specified by the user with the Branch/Guide Angle parameter. A vector contained inside of the cone is selected at random and used as the third direction. This method can also utilize the TEND option where the primary tracking direction is that specified by the TEND method instead of the primary eigenvector. The parameter '--maximumBranchPoints' allows the tracking to have this number of branches being considered at a time. If this number of branch points is exceeded at any time, then the algorithm will revert back to a streamline algorithm until the number of branches is reduced. This allows the user to constrain the computational complexity of the algorithm. (4) The second phase of the GTRACT algorithm is Guided Tracking. This method incorporates anatomical information about the track orientation using an initial guess of the fiber track. In the originally proposed GTRACT method, this would be created from the fibers resulting from the Graph Search tracking. However, in practice this can be created using any method and could be defined manually. To create the guide fiber the program gtractCreateGuideFiber can be used. This program will load a fiber tract that has been generated and create a centerline representation of the fiber tract (i.e. a single fiber). In this method, the fiber tracking follows the primary eigenvector direction unless it deviates from the guide fiber track by a angle greater than that specified by the '--guidedCurvatureThreshold' parameter. The user must specify the guide fiber when running this program. version: 4.0.0 @@ -1712,7 +1712,7 @@ class gtractTensorInputSpec(CommandLineInputSpec): "NOMASK", "ROIAUTO", "ROI", - desc="ROIAUTO: mask is implicitly defined using a otsu forground and hole filling algorithm. ROI: Uses the masks to define what parts of the image should be used for computing the transform. NOMASK: no mask used", + desc="ROIAUTO: mask is implicitly defined using a otsu foreground and hole filling algorithm. ROI: Uses the masks to define what parts of the image should be used for computing the transform. NOMASK: no mask used", argstr="--maskProcessingMode %s", ) maskVolume = File( diff --git a/nipype/interfaces/semtools/diffusion/maxcurvature.py b/nipype/interfaces/semtools/diffusion/maxcurvature.py index c4f170e9cb..be6bfd10e6 100644 --- a/nipype/interfaces/semtools/diffusion/maxcurvature.py +++ b/nipype/interfaces/semtools/diffusion/maxcurvature.py @@ -51,7 +51,7 @@ class maxcurvature(SEMLikeCommandLine): contributor: Casey Goodlett - acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependencies on boost and a fortran compiler. """ input_spec = maxcurvatureInputSpec diff --git a/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py index cbf58623dc..6544282a00 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py +++ b/nipype/interfaces/semtools/diffusion/tractography/commandlineonly.py @@ -47,7 +47,7 @@ class fiberstats(SEMLikeCommandLine): contributor: Casey Goodlett - acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependencies on boost and a fortran compiler. """ input_spec = fiberstatsInputSpec diff --git a/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py index caddd16e22..cd8f1a5cd3 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py +++ b/nipype/interfaces/semtools/diffusion/tractography/fibertrack.py @@ -93,7 +93,7 @@ class fibertrack(SEMLikeCommandLine): contributor: Casey Goodlett - acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependancies on boost and a fortran compiler. + acknowledgements: Hans Johnson(1,3,4); Kent Williams(1); (1=University of Iowa Department of Psychiatry, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering) provided conversions to make DTIProcess compatible with Slicer execution, and simplified the stand-alone build requirements by removing the dependencies on boost and a fortran compiler. """ input_spec = fibertrackInputSpec diff --git a/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py index 67026cb890..5cd092caa6 100644 --- a/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py +++ b/nipype/interfaces/semtools/diffusion/tractography/ukftractography.py @@ -126,7 +126,7 @@ class UKFTractographyInputSpec(CommandLineInputSpec): ) Rs = traits.Float(desc="Measurement noise", argstr="--Rs %f") maxBranchingAngle = traits.Float( - desc="Maximum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle). Branching is supressed when this maxBranchingAngle is set to 0.0", + desc="Maximum branching angle, in degrees. When using multiple tensors, a new branch will be created when the tensors' major directions form an angle between (minBranchingAngle, maxBranchingAngle). Branching is suppressed when this maxBranchingAngle is set to 0.0", argstr="--maxBranchingAngle %f", ) minBranchingAngle = traits.Float( @@ -159,7 +159,7 @@ class UKFTractography(SEMLikeCommandLine): category: Diffusion.Tractography - description: This module traces fibers in a DWI Volume using the multiple tensor unscented Kalman Filter methology. For more informations check the documentation. + description: This module traces fibers in a DWI Volume using the multiple tensor unscented Kalman Filter methology. For more information check the documentation. version: 1.0 diff --git a/nipype/interfaces/semtools/filtering/featuredetection.py b/nipype/interfaces/semtools/filtering/featuredetection.py index 37a44ae4d5..df22f88810 100644 --- a/nipype/interfaces/semtools/filtering/featuredetection.py +++ b/nipype/interfaces/semtools/filtering/featuredetection.py @@ -21,10 +21,10 @@ class GenerateSummedGradientImageInputSpec(CommandLineInputSpec): inputVolume1 = File( - desc="input volume 1, usally t1 image", exists=True, argstr="--inputVolume1 %s" + desc="input volume 1, usually t1 image", exists=True, argstr="--inputVolume1 %s" ) inputVolume2 = File( - desc="input volume 2, usally t2 image", exists=True, argstr="--inputVolume2 %s" + desc="input volume 2, usually t2 image", exists=True, argstr="--inputVolume2 %s" ) outputFileName = traits.Either( traits.Bool, @@ -309,10 +309,10 @@ class ErodeImage(SEMLikeCommandLine): class GenerateBrainClippedImageInputSpec(CommandLineInputSpec): inputImg = File( - desc="input volume 1, usally t1 image", exists=True, argstr="--inputImg %s" + desc="input volume 1, usually t1 image", exists=True, argstr="--inputImg %s" ) inputMsk = File( - desc="input volume 2, usally t2 image", exists=True, argstr="--inputMsk %s" + desc="input volume 2, usually t2 image", exists=True, argstr="--inputMsk %s" ) outputFileName = traits.Either( traits.Bool, @@ -402,7 +402,7 @@ class NeighborhoodMedian(SEMLikeCommandLine): class GenerateTestImageInputSpec(CommandLineInputSpec): inputVolume = File( - desc="input volume 1, usally t1 image", exists=True, argstr="--inputVolume %s" + desc="input volume 1, usually t1 image", exists=True, argstr="--inputVolume %s" ) outputVolume = traits.Either( traits.Bool, diff --git a/nipype/interfaces/semtools/legacy/registration.py b/nipype/interfaces/semtools/legacy/registration.py index cb65aa12f5..959a1b1dc0 100644 --- a/nipype/interfaces/semtools/legacy/registration.py +++ b/nipype/interfaces/semtools/legacy/registration.py @@ -20,7 +20,9 @@ class scalartransformInputSpec(CommandLineInputSpec): - input_image = File(desc="Image to tranform", exists=True, argstr="--input_image %s") + input_image = File( + desc="Image to transform", exists=True, argstr="--input_image %s" + ) output_image = traits.Either( traits.Bool, File(), @@ -35,7 +37,7 @@ class scalartransformInputSpec(CommandLineInputSpec): desc="Output file for transformation parameters", argstr="--transformation %s", ) - invert = traits.Bool(desc="Invert tranform before applying.", argstr="--invert ") + invert = traits.Bool(desc="Invert transform before applying.", argstr="--invert ") deformation = File( desc="Deformation field.", exists=True, argstr="--deformation %s" ) diff --git a/nipype/interfaces/semtools/registration/brainsfit.py b/nipype/interfaces/semtools/registration/brainsfit.py index b319ce1c86..56c9da54f2 100644 --- a/nipype/interfaces/semtools/registration/brainsfit.py +++ b/nipype/interfaces/semtools/registration/brainsfit.py @@ -291,7 +291,7 @@ class BRAINSFitInputSpec(CommandLineInputSpec): argstr="--outputTransform %s", ) initializeRegistrationByCurrentGenericTransform = traits.Bool( - desc="If this flag is ON, the current generic composite transform, resulted from the linear registration stages, is set to initialize the follow nonlinear registration process. However, by the default behaviour, the moving image is first warped based on the existant transform before it is passed to the BSpline registration filter. It is done to speed up the BSpline registration by reducing the computations of composite transform Jacobian.", + desc="If this flag is ON, the current generic composite transform, resulted from the linear registration stages, is set to initialize the follow nonlinear registration process. However, by the default behaviour, the moving image is first warped based on the existent transform before it is passed to the BSpline registration filter. It is done to speed up the BSpline registration by reducing the computations of composite transform Jacobian.", argstr="--initializeRegistrationByCurrentGenericTransform ", ) failureExitCode = traits.Int( @@ -327,7 +327,7 @@ class BRAINSFitInputSpec(CommandLineInputSpec): argstr="--maximumNumberOfCorrections %d", ) gui = traits.Bool( - desc="Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", + desc="Display intermediate image volumes for debugging. NOTE: This is not part of the standard build system, and probably does nothing on your installation.", argstr="--gui ", ) promptUser = traits.Bool( @@ -392,7 +392,7 @@ class BRAINSFit(SEMLikeCommandLine): category: Registration - description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Full documentation avalable here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit. Method described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 + description: Register a three-dimensional volume to a reference volume (Mattes Mutual Information by default). Full documentation available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BRAINSFit. Method described in BRAINSFit: Mutual Information Registrations of Whole-Brain 3D Images, Using the Insight Toolkit, Johnson H.J., Harris G., Williams K., The Insight Journal, 2007. http://hdl.handle.net/1926/1291 version: 3.0.0 diff --git a/nipype/interfaces/semtools/registration/brainsresample.py b/nipype/interfaces/semtools/registration/brainsresample.py index e8ac045936..a77a52dca3 100644 --- a/nipype/interfaces/semtools/registration/brainsresample.py +++ b/nipype/interfaces/semtools/registration/brainsresample.py @@ -75,7 +75,7 @@ class BRAINSResampleInputSpec(CommandLineInputSpec): defaultValue = traits.Float(desc="Default voxel value", argstr="--defaultValue %f") gridSpacing = InputMultiPath( traits.Int, - desc="Add warped grid to output image to help show the deformation that occured with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for makeing a 2D image of grid lines from the 3D space", + desc="Add warped grid to output image to help show the deformation that occurred with specified spacing. A spacing of 0 in a dimension indicates that grid lines should be rendered to fall exactly (i.e. do not allow displacements off that plane). This is useful for making a 2D image of grid lines from the 3D space", sep=",", argstr="--gridSpacing %s", ) diff --git a/nipype/interfaces/semtools/registration/specialized.py b/nipype/interfaces/semtools/registration/specialized.py index 0726ab807c..85f8509a5b 100644 --- a/nipype/interfaces/semtools/registration/specialized.py +++ b/nipype/interfaces/semtools/registration/specialized.py @@ -86,7 +86,7 @@ class VBRAINSDemonWarpInputSpec(CommandLineInputSpec): argstr="--registrationFilterType %s", ) smoothDisplacementFieldSigma = traits.Float( - desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", + desc="A gaussian smoothing value to be applied to the deformation field at each iteration.", argstr="--smoothDisplacementFieldSigma %f", ) numberOfPyramidLevels = traits.Int( @@ -346,7 +346,7 @@ class BRAINSDemonWarpInputSpec(CommandLineInputSpec): argstr="--registrationFilterType %s", ) smoothDisplacementFieldSigma = traits.Float( - desc="A gaussian smoothing value to be applied to the deformation feild at each iteration.", + desc="A gaussian smoothing value to be applied to the deformation field at each iteration.", argstr="--smoothDisplacementFieldSigma %f", ) numberOfPyramidLevels = traits.Int( @@ -403,7 +403,7 @@ class BRAINSDemonWarpInputSpec(CommandLineInputSpec): "ROIAUTO", "ROI", "BOBF", - desc="What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", + desc="What mode to use for using the masks: NOMASK|ROIAUTO|ROI|BOBF. If ROIAUTO is chosen, then the mask is implicitly defined using a otsu foreground and hole filling algorithm. Where the Region Of Interest mode uses the masks to define what parts of the image should be used for computing the deformation field. Brain Only Background Fill uses the masks to pre-process the input images by clipping and filling in the background with a predefined value.", argstr="--maskProcessingMode %s", ) fixedBinaryVolume = File( diff --git a/nipype/interfaces/semtools/segmentation/specialized.py b/nipype/interfaces/semtools/segmentation/specialized.py index 0b1f46f420..a7744775c4 100644 --- a/nipype/interfaces/semtools/segmentation/specialized.py +++ b/nipype/interfaces/semtools/segmentation/specialized.py @@ -37,11 +37,11 @@ class BRAINSCutInputSpec(CommandLineInputSpec): desc="print out some debugging information", argstr="--verbose %d" ) multiStructureThreshold = traits.Bool( - desc="multiStructureThreshold module to deal with overlaping area", + desc="multiStructureThreshold module to deal with overlapping area", argstr="--multiStructureThreshold ", ) histogramEqualization = traits.Bool( - desc="A Histogram Equalization process could be added to the creating/applying process from Subject To Atlas. Default is false, which genreate input vectors without Histogram Equalization. ", + desc="A Histogram Equalization process could be added to the creating/applying process from Subject To Atlas. Default is false, which generate input vectors without Histogram Equalization. ", argstr="--histogramEqualization ", ) computeSSEOn = traits.Bool( @@ -144,7 +144,7 @@ class BRAINSROIAutoInputSpec(CommandLineInputSpec): argstr="--closingSize %f", ) ROIAutoDilateSize = traits.Float( - desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + desc="This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", argstr="--ROIAutoDilateSize %f", ) outputVolumePixelType = traits.Enum( @@ -178,7 +178,7 @@ class BRAINSROIAuto(SEMLikeCommandLine): category: Segmentation.Specialized - description: This program is used to create a mask over the most prominant forground region in an image. This is accomplished via a combination of otsu thresholding and a closing operation. More documentation is available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ForegroundMasking. + description: This program is used to create a mask over the most prominent foreground region in an image. This is accomplished via a combination of otsu thresholding and a closing operation. More documentation is available here: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/ForegroundMasking. version: 2.4.1 @@ -269,7 +269,7 @@ class BRAINSConstellationDetectorInputSpec(CommandLineInputSpec): argstr="--outputVerificationScript %s", ) mspQualityLevel = traits.Int( - desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds), NOTE: -1= Prealigned so no estimate!., ", + desc=", Flag controls how aggressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds), NOTE: -1= Prealigned so no estimate!., ", argstr="--mspQualityLevel %d", ) otsuPercentileThreshold = traits.Float( @@ -391,7 +391,7 @@ class BRAINSConstellationDetectorInputSpec(CommandLineInputSpec): traits.Bool, Directory(), hash_files=False, - desc=", The directory for the debuging images to be written., ", + desc=", The directory for the debugging images to be written., ", argstr="--resultsDir %s", ) writedebuggingImagesLevel = traits.Int( @@ -457,7 +457,7 @@ class BRAINSConstellationDetectorOutputSpec(TraitedSpec): exists=True, ) resultsDir = Directory( - desc=", The directory for the debuging images to be written., ", + desc=", The directory for the debugging images to be written., ", exists=True, ) @@ -467,7 +467,7 @@ class BRAINSConstellationDetector(SEMLikeCommandLine): category: Segmentation.Specialized - description: This program will find the mid-sagittal plane, a constellation of landmarks in a volume, and create an AC/PC aligned data set with the AC point at the center of the voxel lattice (labeled at the origin of the image physical space.) Part of this work is an extention of the algorithms originally described by Dr. Babak A. Ardekani, Alvin H. Bachman, Model-based automatic detection of the anterior and posterior commissures on MRI scans, NeuroImage, Volume 46, Issue 3, 1 July 2009, Pages 677-682, ISSN 1053-8119, DOI: 10.1016/j.neuroimage.2009.02.030. (http://www.sciencedirect.com/science/article/B6WNP-4VRP25C-4/2/8207b962a38aa83c822c6379bc43fe4c) + description: This program will find the mid-sagittal plane, a constellation of landmarks in a volume, and create an AC/PC aligned data set with the AC point at the center of the voxel lattice (labeled at the origin of the image physical space.) Part of this work is an extension of the algorithms originally described by Dr. Babak A. Ardekani, Alvin H. Bachman, Model-based automatic detection of the anterior and posterior commissures on MRI scans, NeuroImage, Volume 46, Issue 3, 1 July 2009, Pages 677-682, ISSN 1053-8119, DOI: 10.1016/j.neuroimage.2009.02.030. (http://www.sciencedirect.com/science/article/B6WNP-4VRP25C-4/2/8207b962a38aa83c822c6379bc43fe4c) version: 1.0 @@ -626,7 +626,7 @@ class BinaryMaskEditorBasedOnLandmarks(SEMLikeCommandLine): class BRAINSMultiSTAPLEInputSpec(CommandLineInputSpec): inputCompositeT1Volume = File( - desc="Composite T1, all label maps transofrmed into the space for this image.", + desc="Composite T1, all label maps transformed into the space for this image.", exists=True, argstr="--inputCompositeT1Volume %s", ) @@ -724,7 +724,7 @@ class BRAINSABCInputSpec(CommandLineInputSpec): traits.Bool, Directory(), hash_files=False, - desc="Ouput directory", + desc="Output directory", argstr="--outputDir %s", ) atlasToSubjectTransformType = traits.Enum( @@ -832,7 +832,7 @@ class BRAINSABCInputSpec(CommandLineInputSpec): ) numberOfSubSamplesInEachPlugArea = InputMultiPath( traits.Int, - desc="Number of continous index samples taken at each direction of lattice space for each plug volume.", + desc="Number of continuous index samples taken at each direction of lattice space for each plug volume.", sep=",", argstr="--numberOfSubSamplesInEachPlugArea %s", ) @@ -872,7 +872,7 @@ class BRAINSABCOutputSpec(TraitedSpec): desc="(optional) Filename to which save the final state of the registration", exists=True, ) - outputDir = Directory(desc="Ouput directory", exists=True) + outputDir = Directory(desc="Output directory", exists=True) atlasToSubjectTransform = File( desc="The transform from atlas to the subject", exists=True ) diff --git a/nipype/interfaces/semtools/testing/generateaveragelmkfile.py b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py index 7138dc37d3..fe3aa71521 100644 --- a/nipype/interfaces/semtools/testing/generateaveragelmkfile.py +++ b/nipype/interfaces/semtools/testing/generateaveragelmkfile.py @@ -29,14 +29,14 @@ class GenerateAverageLmkFileInputSpec(CommandLineInputSpec): traits.Bool, File(), hash_files=False, - desc="Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", + desc="Output landmark file name that includes average values for landmarks (.fcsv or .wts)", argstr="--outputLandmarkFile %s", ) class GenerateAverageLmkFileOutputSpec(TraitedSpec): outputLandmarkFile = File( - desc="Ouput landmark file name that includes average values for landmarks (.fcsv or .wts)", + desc="Output landmark file name that includes average values for landmarks (.fcsv or .wts)", exists=True, ) diff --git a/nipype/interfaces/semtools/utilities/brains.py b/nipype/interfaces/semtools/utilities/brains.py index 5ff0f9aa35..b5964e3555 100644 --- a/nipype/interfaces/semtools/utilities/brains.py +++ b/nipype/interfaces/semtools/utilities/brains.py @@ -52,7 +52,7 @@ class BRAINSConstellationModelerInputSpec(CommandLineInputSpec): argstr="--resultsDir %s", ) mspQualityLevel = traits.Int( - desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", + desc=", Flag controls how aggressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", argstr="--mspQualityLevel %d", ) rescaleIntensities = traits.Bool( @@ -736,7 +736,7 @@ class BRAINSClipInferior(SEMLikeCommandLine): class GenerateLabelMapFromProbabilityMapInputSpec(CommandLineInputSpec): inputVolumes = InputMultiPath( File(exists=True), - desc="The Input probaiblity images to be computed for lable maps", + desc="The Input probaiblity images to be computed for label maps", argstr="--inputVolumes %s...", ) outputLabelVolume = traits.Either( @@ -805,7 +805,7 @@ class BRAINSAlignMSPInputSpec(CommandLineInputSpec): argstr="--writedebuggingImagesLevel %d", ) mspQualityLevel = traits.Int( - desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", + desc=", Flag controls how aggressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", argstr="--mspQualityLevel %d", ) rescaleIntensities = traits.Bool( @@ -857,11 +857,11 @@ class BRAINSAlignMSPOutputSpec(TraitedSpec): class BRAINSAlignMSP(SEMLikeCommandLine): - """title: Align Mid Saggital Brain (BRAINS) + """title: Align Mid Sagittal Brain (BRAINS) category: Utilities.BRAINS - description: Resample an image into ACPC alignement ACPCDetect + description: Resample an image into ACPC alignment ACPCDetect """ input_spec = BRAINSAlignMSPInputSpec @@ -886,7 +886,7 @@ class BRAINSLandmarkInitializerInputSpec(CommandLineInputSpec): argstr="--inputMovingLandmarkFilename %s", ) inputWeightFilename = File( - desc="Input weight file name for landmarks. Higher weighted landmark will be considered more heavily. Weights are propotional, that is the magnitude of weights will be normalized by its minimum and maximum value. ", + desc="Input weight file name for landmarks. Higher weighted landmark will be considered more heavily. Weights are proportional, that is the magnitude of weights will be normalized by its minimum and maximum value. ", exists=True, argstr="--inputWeightFilename %s", ) @@ -991,7 +991,7 @@ class BRAINSSnapShotWriterInputSpec(CommandLineInputSpec): ) inputPlaneDirection = InputMultiPath( traits.Int, - desc="Plane to display. In general, 0=saggital, 1=coronal, and 2=axial plane.", + desc="Plane to display. In general, 0=sagittal, 1=coronal, and 2=axial plane.", sep=",", argstr="--inputPlaneDirection %s", ) diff --git a/nipype/interfaces/slicer/generate_classes.py b/nipype/interfaces/slicer/generate_classes.py index f71d963142..195579aaec 100644 --- a/nipype/interfaces/slicer/generate_classes.py +++ b/nipype/interfaces/slicer/generate_classes.py @@ -2,7 +2,7 @@ """This script generates Slicer Interfaces based on the CLI modules XML. CLI modules are selected from the hardcoded list below and generated code is placed in the cli_modules.py file (and imported in __init__.py). For this to work -correctly you must have your CLI executabes in $PATH""" +correctly you must have your CLI executables in $PATH""" import xml.dom.minidom import subprocess import os @@ -121,7 +121,7 @@ def generate_all_classes( modules_list=[], launcher=[], redirect_x=False, mipav_hacks=False ): """modules_list contains all the SEM compliant tools that should have wrappers created for them. - launcher containtains the command line prefix wrapper arugments needed to prepare + launcher containtains the command line prefix wrapper arguments needed to prepare a proper environment for each of the modules. """ all_code = {} diff --git a/nipype/interfaces/slicer/registration/brainsfit.py b/nipype/interfaces/slicer/registration/brainsfit.py index e26c7036a2..ddbb330a63 100644 --- a/nipype/interfaces/slicer/registration/brainsfit.py +++ b/nipype/interfaces/slicer/registration/brainsfit.py @@ -104,7 +104,7 @@ class BRAINSFitInputSpec(CommandLineInputSpec): "NOMASK", "ROIAUTO", "ROI", - desc="What mode to use for using the masks. If ROIAUTO is choosen, then the mask is implicitly defined using a otsu forground and hole filling algorithm. The Region Of Interest mode (choose ROI) uses the masks to define what parts of the image should be used for computing the transform.", + desc="What mode to use for using the masks. If ROIAUTO is chosen, then the mask is implicitly defined using a otsu foreground and hole filling algorithm. The Region Of Interest mode (choose ROI) uses the masks to define what parts of the image should be used for computing the transform.", argstr="--maskProcessingMode %s", ) fixedBinaryVolume = File( @@ -208,7 +208,7 @@ class BRAINSFitInputSpec(CommandLineInputSpec): ) transformType = InputMultiPath( traits.Str, - desc="Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, and BSpline. Specifiying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", + desc="Specifies a list of registration types to be used. The valid types are, Rigid, ScaleVersor3D, ScaleSkewVersor3D, Affine, and BSpline. Specifying more than one in a comma separated list will initialize the next stage with the previous results. If registrationClass flag is used, it overrides this parameter setting.", sep=",", argstr="--transformType %s", ) @@ -234,7 +234,7 @@ class BRAINSFitInputSpec(CommandLineInputSpec): argstr="--medianFilterSize %s", ) removeIntensityOutliers = traits.Float( - desc="The half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the moduel will throw away 0.005 % of both tails, so 0.01% of intensities in total would be ignored in its statistic calculation. ", + desc="The half percentage to decide outliers of image intensities. The default value is zero, which means no outlier removal. If the value of 0.005 is given, the module will throw away 0.005 % of both tails, so 0.01% of intensities in total would be ignored in its statistic calculation. ", argstr="--removeIntensityOutliers %f", ) useCachingOfBSplineWeightsMode = traits.Enum( @@ -251,11 +251,11 @@ class BRAINSFitInputSpec(CommandLineInputSpec): argstr="--useExplicitPDFDerivativesMode %s", ) ROIAutoDilateSize = traits.Float( - desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + desc="This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", argstr="--ROIAutoDilateSize %f", ) ROIAutoClosingSize = traits.Float( - desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", + desc="This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the hole closing size in mm. It is rounded up to the nearest whole pixel size in each direction. The default is to use a closing size of 9mm. For mouse data this value may need to be reset to 0.9 or smaller.", argstr="--ROIAutoClosingSize %f", ) relaxationFactor = traits.Float( @@ -295,7 +295,7 @@ class BRAINSFitInputSpec(CommandLineInputSpec): argstr="--projectedGradientTolerance %f", ) gui = traits.Bool( - desc="Display intermediate image volumes for debugging. NOTE: This is not part of the standard build sytem, and probably does nothing on your installation.", + desc="Display intermediate image volumes for debugging. NOTE: This is not part of the standard build system, and probably does nothing on your installation.", argstr="--gui ", ) promptUser = traits.Bool( diff --git a/nipype/interfaces/slicer/segmentation/specialized.py b/nipype/interfaces/slicer/segmentation/specialized.py index 3abab602dc..da0bff4dd1 100644 --- a/nipype/interfaces/slicer/segmentation/specialized.py +++ b/nipype/interfaces/slicer/segmentation/specialized.py @@ -28,7 +28,7 @@ class RobustStatisticsSegmenterInputSpec(CommandLineInputSpec): argstr="--intensityHomogeneity %f", ) curvatureWeight = traits.Float( - desc="Given sphere 1.0 score and extreme rough bounday/surface 0 score, what is the expected smoothness of the object?", + desc="Given sphere 1.0 score and extreme rough boundary/surface 0 score, what is the expected smoothness of the object?", argstr="--curvatureWeight %f", ) labelValue = traits.Int( @@ -255,7 +255,7 @@ class BRAINSROIAutoInputSpec(CommandLineInputSpec): argstr="--closingSize %f", ) ROIAutoDilateSize = traits.Float( - desc="This flag is only relavent when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", + desc="This flag is only relevant when using ROIAUTO mode for initializing masks. It defines the final dilation size to capture a bit of background outside the tissue region. At setting of 10mm has been shown to help regularize a BSpline registration type so that there is some background constraints to match the edges of the head better.", argstr="--ROIAutoDilateSize %f", ) outputVolumePixelType = traits.Enum( @@ -288,7 +288,7 @@ class BRAINSROIAuto(SEMLikeCommandLine): category: Segmentation.Specialized - description: This tool uses a combination of otsu thresholding and a closing operations to identify the most prominant foreground region in an image. + description: This tool uses a combination of otsu thresholding and a closing operations to identify the most prominent foreground region in an image. version: 2.4.1 diff --git a/nipype/interfaces/slicer/surface.py b/nipype/interfaces/slicer/surface.py index d2ebe4d15f..6d7a7b2382 100644 --- a/nipype/interfaces/slicer/surface.py +++ b/nipype/interfaces/slicer/surface.py @@ -351,7 +351,7 @@ class ModelMaker(SEMLikeCommandLine): category: Surface Models - description: Create 3D surface models from segmented data.

Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).

Create Multiple:

If you specify a list of Labels, it will over ride any start/end label settings.

If you clickGenerate Allit will over ride the list of lables and any start/end label settings.

Model Maker Settings:

You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.
You can set the flags to split normals or generate point normals in this pane as well.
You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:
slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()

+ description: Create 3D surface models from segmented data.

Models are imported into Slicer under a model hierarchy node in a MRML scene. The model colors are set by the color table associated with the input volume (these colours will only be visible if you load the model scene file).

Create Multiple:

If you specify a list of Labels, it will over ride any start/end label settings.

If you clickGenerate Allit will over ride the list of labels and any start/end label settings.

Model Maker Settings:

You can set the number of smoothing iterations, target reduction in number of polygons (decimal percentage). Use 0 and 1 if you wish no smoothing nor decimation.
You can set the flags to split normals or generate point normals in this pane as well.
You can save a copy of the models after intermediate steps (marching cubes, smoothing, and decimation if not joint smoothing, otherwise just after decimation); these models are not saved in the mrml file, turn off deleting temporary files first in the python window:
slicer.modules.modelmaker.cliModuleLogic().DeleteTemporaryFilesOff()

version: 4.1 diff --git a/nipype/interfaces/utility/tests/test_wrappers.py b/nipype/interfaces/utility/tests/test_wrappers.py index fda81b2f5b..1e2ce8a953 100644 --- a/nipype/interfaces/utility/tests/test_wrappers.py +++ b/nipype/interfaces/utility/tests/test_wrappers.py @@ -95,7 +95,7 @@ def test_function_with_imports(tmpdir): def test_aux_connect_function(tmpdir): - """This tests excution nodes with multiple inputs and auxiliary + """This tests execution nodes with multiple inputs and auxiliary function inside the Workflow connect function. """ tmpdir.chdir() diff --git a/nipype/pipeline/engine/report_template.html b/nipype/pipeline/engine/report_template.html index 3fb66b4a02..86b2745122 100644 --- a/nipype/pipeline/engine/report_template.html +++ b/nipype/pipeline/engine/report_template.html @@ -261,4 +261,3 @@

- diff --git a/nipype/pipeline/engine/tests/test_join.py b/nipype/pipeline/engine/tests/test_join.py index 17b462367b..9606587c13 100644 --- a/nipype/pipeline/engine/tests/test_join.py +++ b/nipype/pipeline/engine/tests/test_join.py @@ -40,7 +40,7 @@ class IncrementInputSpec(nib.TraitedSpec): class IncrementOutputSpec(nib.TraitedSpec): - output1 = nib.traits.Int(desc="ouput") + output1 = nib.traits.Int(desc="output") class IncrementInterface(nib.SimpleInterface): @@ -63,7 +63,7 @@ class SumInputSpec(nib.TraitedSpec): class SumOutputSpec(nib.TraitedSpec): - output1 = nib.traits.Int(desc="ouput") + output1 = nib.traits.Int(desc="output") operands = nib.traits.List(nib.traits.Int, desc="operands") @@ -91,7 +91,7 @@ class SetInputSpec(nib.TraitedSpec): class SetOutputSpec(nib.TraitedSpec): - output1 = nib.traits.Int(desc="ouput") + output1 = nib.traits.Int(desc="output") class SetInterface(nib.BaseInterface): diff --git a/nipype/utils/config.py b/nipype/utils/config.py index 9c7505455d..98e1be31a9 100644 --- a/nipype/utils/config.py +++ b/nipype/utils/config.py @@ -214,7 +214,7 @@ def get_data(self, key): return None def save_data(self, key, value): - """Store config flie""" + """Store config file""" datadict = {} if os.path.exists(self.data_file): with SoftFileLock("%s.lock" % self.data_file): diff --git a/nipype/utils/docparse.py b/nipype/utils/docparse.py index 576a235892..45fbca5df7 100644 --- a/nipype/utils/docparse.py +++ b/nipype/utils/docparse.py @@ -191,7 +191,7 @@ def build_doc(doc, opts): ------- newdoc : string The docstring with flags replaced with attribute names and - formated to match nipy standards (as best we can). + formatted to match nipy standards (as best we can). """ @@ -206,7 +206,7 @@ def build_doc(doc, opts): # Probably an empty line continue # For lines we care about, the first item is the flag - if "," in linelist[0]: # sometimes flags are only seperated by comma + if "," in linelist[0]: # sometimes flags are only separated by comma flag = linelist[0].split(",")[0] else: flag = linelist[0] @@ -223,7 +223,7 @@ def build_doc(doc, opts): # For all the docs I've looked at, the flags all have # indentation (spaces) at the start of the line. # Other parts of the docs, like 'usage' statements - # start with alpha-numeric characters. We only care + # start with alphanumeric characters. We only care # about the flags. flags_doc.append(line) return format_params(newdoc, flags_doc) @@ -246,7 +246,7 @@ def get_doc(cmd, opt_map, help_flag=None, trap_error=True): Returns ------- doc : string - The formated docstring + The formatted docstring """ res = CommandLine( diff --git a/nipype/utils/spm_docs.py b/nipype/utils/spm_docs.py index 6864992e0e..758d1fbb39 100644 --- a/nipype/utils/spm_docs.py +++ b/nipype/utils/spm_docs.py @@ -38,7 +38,7 @@ def grab_doc(task_name): def _strip_header(doc): """Strip Matlab header and splash info off doc. - Searches for the tag 'NIPYPE' in the doc and returns everyting after that. + Searches for the tag 'NIPYPE' in the doc and returns everything after that. """ hdr = "NIPYPE" diff --git a/nipype/utils/tmpdirs.py b/nipype/utils/tmpdirs.py index 4752514e8f..70709ae209 100644 --- a/nipype/utils/tmpdirs.py +++ b/nipype/utils/tmpdirs.py @@ -16,7 +16,7 @@ class TemporaryDirectory(object): with TemporaryDirectory() as tmpdir: ... - Upon exiting the context, the directory and everthing contained + Upon exiting the context, the directory and everything contained in it are removed. """ diff --git a/tools/feedstock.sh b/tools/feedstock.sh index 0ca82d1751..831f04cf39 100755 --- a/tools/feedstock.sh +++ b/tools/feedstock.sh @@ -13,7 +13,7 @@ # GITHUB_TOKEN: Pre-established token for user or bot # # One of: -# CIRCLE_BRANCH: Name of release branch (rel/) +# CIRCLE_BRANCH: Name of release branch (rel/) # CIRCLE_TAG: Name of release tag () # # Depends: