diff --git a/README.rst b/README.rst
index c7c55318..4dfb207d 100644
--- a/README.rst
+++ b/README.rst
@@ -47,21 +47,46 @@ specification,
.. code-block:: yaml
- task_name: Registration
- nipype_module: nipype.interfaces.ants.registration
- output_requirements:
- output_warped_image: ["fixed_image", "moving_image", "output_transform_prefix"]
- output_templates:
- output_warped_image: "{output_transform_prefix}warped"
- doctest:
- fixed_image: test.nii.gz
- moving_image: test.nii.gz
- cmdline: >-
- antsRegistration --output [ output_, output_warped_image.nii.gz ]
- --metric Mattes[ test.nii, test.nii, 1, 32, Random, 0.05 ]
- tests_inputs: []
- tests_outputs:
- - AttributeError
+ task_name: n4_bias_field_correction
+ nipype_name: N4BiasFieldCorrection
+ nipype_module: nipype.interfaces.ants.segmentation
+ inputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ input_image: medimage/nifti1
+ mask_image: medimage/nifti1
+ weight_image: medimage/nifti1
+ bias_image: medimage/nifti1
+ metadata:
+ # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
+ outputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ output_image: medimage/nifti1
+ bias_image: medimage/nifti1
+ callables:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set to the `callable` attribute of output fields
+ templates:
+ # dict[str, str] - `output_file_template` values to be provided to output fields
+ requirements:
+ # dict[str, list[str]] - input fields that are required to be provided for the output field to be present
*Detailed description of the different options to go here*
diff --git a/conftest.py b/conftest.py
index cdf2e046..9cad16c8 100644
--- a/conftest.py
+++ b/conftest.py
@@ -4,6 +4,7 @@
import tempfile
import pytest
from click.testing import CliRunner
+from fileformats.generic import File
PKG_DIR = Path(__file__).parent
@@ -12,6 +13,11 @@
EXAMPLE_WORKFLOWS_DIR = EXAMPLE_SPECS_DIR / "workflow"
+@pytest.fixture
+def gen_test_conftest():
+ return PKG_DIR / "scripts" / "pkg_gen" / "resources" / "conftest.py"
+
+
@pytest.fixture(params=[str(p.stem) for p in (EXAMPLE_TASKS_DIR).glob("*.yaml")])
def task_spec_file(request):
return (EXAMPLE_TASKS_DIR / request.param).with_suffix(".yaml")
diff --git a/example-specs/task/ants_n4_bias_field_correction.yaml b/example-specs/task/ants_n4_bias_field_correction.yaml
new file mode 100644
index 00000000..dfa4ab6b
--- /dev/null
+++ b/example-specs/task/ants_n4_bias_field_correction.yaml
@@ -0,0 +1,333 @@
+# This file is used to manually specify the semi-automatic conversion of
+# 'nipype.interfaces.ants.segmentation.N4BiasFieldCorrection' from Nipype to Pydra.
+#
+# Please fill-in/edit the fields below where appropriate
+#
+# Inputs
+# ------
+# dimension : enum
+# image dimension (2, 3 or 4)
+# input_image : file
+# input for bias correction. Negative values or values close to zero should be processed prior to correction
+# mask_image : file
+# image to specify region to perform final bias correction in
+# weight_image : file
+# image for relative weighting (e.g. probability map of the white matter) of voxels during the B-spline fitting.
+# output_image : str
+# output file name
+# bspline_fitting_distance : float
+#
+# bspline_order : int
+#
+# shrink_factor : int
+#
+# n_iterations : list
+#
+# convergence_threshold : float
+#
+# save_bias : bool
+# True if the estimated bias should be saved to file.
+# bias_image : file
+# Filename for the estimated bias.
+# copy_header : bool
+# copy headers of the original image into the output (corrected) file
+# rescale_intensities : bool
+# [NOTE: Only ANTs>=2.1.0] At each iteration, a new intensity mapping is calculated and applied but there is nothing which constrains the new intensity range to be within certain values. The result is that the range can "drift" from the original at each iteration. This option rescales to the [min,max] range of the original image intensities within the user-specified mask.
+# histogram_sharpening : tuple
+# Three-values tuple of histogram sharpening parameters (FWHM, wienerNose, numberOfHistogramBins). These options describe the histogram sharpening parameters, i.e. the deconvolution step parameters described in the original N3 algorithm. The default values have been shown to work fairly well.
+# num_threads : int
+# Number of ITK threads to use
+# args : str
+# Additional parameters to the command
+# environ : dict
+# Environment variables
+#
+# Outputs
+# -------
+# output_image : file
+# Warped image
+# bias_image : file
+# Estimated bias
+#
+# Docs
+# ----
+#
+# Bias field correction.
+#
+# N4 is a variant of the popular N3 (nonparameteric nonuniform normalization)
+# retrospective bias correction algorithm. Based on the assumption that the
+# corruption of the low frequency bias field can be modeled as a convolution of
+# the intensity histogram by a Gaussian, the basic algorithmic protocol is to
+# iterate between deconvolving the intensity histogram by a Gaussian, remapping
+# the intensities, and then spatially smoothing this result by a B-spline modeling
+# of the bias field itself. The modifications from and improvements obtained over
+# the original N3 algorithm are described in [Tustison2010]_.
+#
+# .. [Tustison2010] N. Tustison et al.,
+# N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging,
+# 29(6):1310-1320, June 2010.
+#
+# Examples
+# --------
+#
+# >>> import copy
+# >>> from nipype.interfaces.ants import N4BiasFieldCorrection
+# >>> n4 = N4BiasFieldCorrection()
+# >>> n4.inputs.dimension = 3
+# >>> n4.inputs.input_image = 'structural.nii'
+# >>> n4.inputs.bspline_fitting_distance = 300
+# >>> n4.inputs.shrink_factor = 3
+# >>> n4.inputs.n_iterations = [50,50,30,20]
+# >>> n4.cmdline
+# 'N4BiasFieldCorrection --bspline-fitting [ 300 ]
+# -d 3 --input-image structural.nii
+# --convergence [ 50x50x30x20 ] --output structural_corrected.nii
+# --shrink-factor 3'
+#
+# >>> n4_2 = copy.deepcopy(n4)
+# >>> n4_2.inputs.convergence_threshold = 1e-6
+# >>> n4_2.cmdline
+# 'N4BiasFieldCorrection --bspline-fitting [ 300 ]
+# -d 3 --input-image structural.nii
+# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii
+# --shrink-factor 3'
+#
+# >>> n4_3 = copy.deepcopy(n4_2)
+# >>> n4_3.inputs.bspline_order = 5
+# >>> n4_3.cmdline
+# 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ]
+# -d 3 --input-image structural.nii
+# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii
+# --shrink-factor 3'
+#
+# >>> n4_4 = N4BiasFieldCorrection()
+# >>> n4_4.inputs.input_image = 'structural.nii'
+# >>> n4_4.inputs.save_bias = True
+# >>> n4_4.inputs.dimension = 3
+# >>> n4_4.cmdline
+# 'N4BiasFieldCorrection -d 3 --input-image structural.nii
+# --output [ structural_corrected.nii, structural_bias.nii ]'
+#
+# >>> n4_5 = N4BiasFieldCorrection()
+# >>> n4_5.inputs.input_image = 'structural.nii'
+# >>> n4_5.inputs.dimension = 3
+# >>> n4_5.inputs.histogram_sharpening = (0.12, 0.02, 200)
+# >>> n4_5.cmdline
+# 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200]
+# --input-image structural.nii --output structural_corrected.nii'
+#
+#
+task_name: n4_bias_field_correction
+nipype_name: N4BiasFieldCorrection
+nipype_module: nipype.interfaces.ants.segmentation
+inputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ input_image: medimage/nifti1
+ mask_image: medimage/nifti1
+ weight_image: medimage/nifti1
+ bias_image: medimage/nifti1
+ metadata:
+ # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
+outputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ output_image: medimage/nifti1
+ bias_image: medimage/nifti1
+ callables:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set to the `callable` attribute of output fields
+ templates:
+ # dict[str, str] - `output_file_template` values to be provided to output fields
+ requirements:
+ # dict[str, list[str]] - input fields that are required to be provided for the output field to be present
+tests:
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ dimension: '3'
+ input_image:
+ bspline_fitting_distance: '300'
+ shrink_factor: '3'
+ n_iterations: '[50,50,30,20]'
+ imports: &id001
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ - module: copy
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ convergence_threshold: 1e-6
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ bspline_order: '5'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ input_image:
+ save_bias: 'True'
+ dimension: '3'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ input_image:
+ dimension: '3'
+ histogram_sharpening: (0.12, 0.02, 200)
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+doctests:
+- cmdline: N4BiasFieldCorrection --bspline-fitting [ 300 ] -d 3 --input-image structural.nii --convergence [ 50x50x30x20 ] --output structural_corrected.nii --shrink-factor 3
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ dimension: '3'
+ input_image:
+ bspline_fitting_distance: '300'
+ shrink_factor: '3'
+ n_iterations: '[50,50,30,20]'
+ imports: *id001
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: N4BiasFieldCorrection --bspline-fitting [ 300 ] -d 3 --input-image structural.nii --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii --shrink-factor 3
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ convergence_threshold: 1e-6
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] -d 3 --input-image structural.nii --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii --shrink-factor 3
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ bspline_order: '5'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: N4BiasFieldCorrection -d 3 --input-image structural.nii --output [ structural_corrected.nii, structural_bias.nii ]
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ input_image:
+ save_bias: 'True'
+ dimension: '3'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] --input-image structural.nii --output structural_corrected.nii
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ input_image:
+ dimension: '3'
+ histogram_sharpening: (0.12, 0.02, 200)
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/example-specs/task/ants_registration.yaml b/example-specs/task/ants_registration.yaml
new file mode 100644
index 00000000..2cce5cfd
--- /dev/null
+++ b/example-specs/task/ants_registration.yaml
@@ -0,0 +1,997 @@
+# This file is used to manually specify the semi-automatic conversion of
+# 'nipype.interfaces.ants.registration.Registration' from Nipype to Pydra.
+#
+# Please fill-in/edit the fields below where appropriate
+#
+# Inputs
+# ------
+# dimension : enum
+# image dimension (2 or 3)
+# fixed_image : inputmultiobject
+# Image to which the moving_image should be transformed(usually a structural image)
+# fixed_image_mask : file
+# Mask used to limit metric sampling region of the fixed imagein all stages
+# fixed_image_masks : inputmultiobject
+# Masks used to limit metric sampling region of the fixed image, defined per registration stage(Use "NULL" to omit a mask at a given stage)
+# moving_image : inputmultiobject
+# Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to
+# moving_image_mask : file
+# mask used to limit metric sampling region of the moving imagein all stages
+# moving_image_masks : inputmultiobject
+# Masks used to limit metric sampling region of the moving image, defined per registration stage(Use "NULL" to omit a mask at a given stage)
+# save_state : file
+# Filename for saving the internal restorable state of the registration
+# restore_state : file
+# Filename for restoring the internal restorable state of the registration
+# initial_moving_transform : inputmultiobject
+# A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order.
+# invert_initial_moving_transform : inputmultiobject
+# One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used.
+# initial_moving_transform_com : enum
+# Align the moving_image and fixed_image before registration using the geometric center of the images (=0), the image intensities (=1), or the origin of the images (=2).
+# metric_item_trait : enum
+#
+# metric_stage_trait : traitcompound
+#
+# metric : list
+# the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier.
+# metric_weight_item_trait : float
+#
+# metric_weight_stage_trait : traitcompound
+#
+# metric_weight : list
+# the metric weight(s) for each stage. The weights must sum to 1 per stage.
+# radius_bins_item_trait : int
+#
+# radius_bins_stage_trait : traitcompound
+#
+# radius_or_number_of_bins : list
+# the number of bins in each stage for the MI and Mattes metric, the radius for other metrics
+# sampling_strategy_item_trait : enum
+#
+# sampling_strategy_stage_trait : traitcompound
+#
+# sampling_strategy : list
+# the metric sampling strategy (strategies) for each stage
+# sampling_percentage_item_trait : traitcompound
+#
+# sampling_percentage_stage_trait : traitcompound
+#
+# sampling_percentage : list
+# the metric sampling percentage(s) to use for each stage
+# use_estimate_learning_rate_once : list
+#
+# use_histogram_matching : traitcompound
+# Histogram match the images before registration.
+# interpolation : enum
+#
+# interpolation_parameters : traitcompound
+#
+# write_composite_transform : bool
+#
+# collapse_output_transforms : bool
+# Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk.
+# initialize_transforms_per_stage : bool
+# Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine).
+# float : bool
+# Use float instead of double for computations.
+# transforms : list
+#
+# transform_parameters : list
+#
+# restrict_deformation : list
+# This option allows the user to restrict the optimization of the displacement field, translation, rigid or affine transform on a per-component basis. For example, if one wants to limit the deformation or rotation of 3-D volume to the first two dimensions, this is possible by specifying a weight vector of '1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid transformation. Low-dimensional restriction only works if there are no preceding transformations.
+# number_of_iterations : list
+#
+# smoothing_sigmas : list
+#
+# sigma_units : list
+# units for smoothing sigmas
+# shrink_factors : list
+#
+# convergence_threshold : list
+#
+# convergence_window_size : list
+#
+# output_transform_prefix : str
+#
+# output_warped_image : traitcompound
+#
+# output_inverse_warped_image : traitcompound
+#
+# winsorize_upper_quantile : range
+# The Upper quantile to clip image ranges
+# winsorize_lower_quantile : range
+# The Lower quantile to clip image ranges
+# random_seed : int
+# Fixed seed for random number generation
+# verbose : bool
+#
+# num_threads : int
+# Number of ITK threads to use
+# args : str
+# Additional parameters to the command
+# environ : dict
+# Environment variables
+#
+# Outputs
+# -------
+# forward_transforms : list
+# List of output transforms for forward registration
+# reverse_forward_transforms : list
+# List of output transforms for forward registration reversed for antsApplyTransform
+# reverse_transforms : list
+# List of output transforms for reverse registration
+# forward_invert_flags : list
+# List of flags corresponding to the forward transforms
+# reverse_forward_invert_flags : list
+# List of flags corresponding to the forward transforms reversed for antsApplyTransform
+# reverse_invert_flags : list
+# List of flags corresponding to the reverse transforms
+# composite_transform : file
+# Composite transform file
+# inverse_composite_transform : file
+# Inverse composite transform file
+# warped_image : file
+# Outputs warped image
+# inverse_warped_image : file
+# Outputs the inverse of the warped image
+# save_state : file
+# The saved registration state to be restored
+# metric_value : float
+# the final value of metric
+# elapsed_time : float
+# the total elapsed time as reported by ANTs
+#
+# Docs
+# ----
+# ANTs Registration command for registration of images
+#
+# `antsRegistration `_ registers a ``moving_image`` to a ``fixed_image``,
+# using a predefined (sequence of) cost function(s) and transformation operations.
+# The cost function is defined using one or more 'metrics', specifically
+# local cross-correlation (``CC``), Mean Squares (``MeanSquares``), Demons (``Demons``),
+# global correlation (``GC``), or Mutual Information (``Mattes`` or ``MI``).
+#
+# ANTS can use both linear (``Translation``, ``Rigid``, ``Affine``, ``CompositeAffine``,
+# or ``Translation``) and non-linear transformations (``BSpline``, ``GaussianDisplacementField``,
+# ``TimeVaryingVelocityField``, ``TimeVaryingBSplineVelocityField``, ``SyN``, ``BSplineSyN``,
+# ``Exponential``, or ``BSplineExponential``). Usually, registration is done in multiple
+# *stages*. For example first an Affine, then a Rigid, and ultimately a non-linear
+# (Syn)-transformation.
+#
+# antsRegistration can be initialized using one or more transforms from moving_image
+# to fixed_image with the ``initial_moving_transform``-input. For example, when you
+# already have a warpfield that corrects for geometrical distortions in an EPI (functional) image,
+# that you want to apply before an Affine registration to a structural image.
+# You could put this transform into 'intial_moving_transform'.
+#
+# The Registration-interface can output the resulting transform(s) that map moving_image to
+# fixed_image in a single file as a ``composite_transform`` (if ``write_composite_transform``
+# is set to ``True``), or a list of transforms as ``forwards_transforms``. It can also output
+# inverse transforms (from ``fixed_image`` to ``moving_image``) in a similar fashion using
+# ``inverse_composite_transform``. Note that the order of ``forward_transforms`` is in 'natural'
+# order: the first element should be applied first, the last element should be applied last.
+#
+# Note, however, that ANTS tools always apply lists of transformations in reverse order (the last
+# transformation in the list is applied first). Therefore, if the output forward_transforms
+# is a list, one can not directly feed it into, for example, ``ants.ApplyTransforms``. To
+# make ``ants.ApplyTransforms`` apply the transformations in the same order as ``ants.Registration``,
+# you have to provide the list of transformations in reverse order from ``forward_transforms``.
+# ``reverse_forward_transforms`` outputs ``forward_transforms`` in reverse order and can be used for
+# this purpose. Note also that, because ``composite_transform`` is always a single file, this
+# output is preferred for most use-cases.
+#
+# More information can be found in the `ANTS
+# manual `_.
+#
+# See below for some useful examples.
+#
+# Examples
+# --------
+#
+# Set up a Registration node with some default settings. This Node registers
+# 'fixed1.nii' to 'moving1.nii' by first fitting a linear 'Affine' transformation, and
+# then a non-linear 'SyN' transformation, both using the Mutual Information-cost
+# metric.
+#
+# The registration is initialized by first applying the (linear) transform
+# trans.mat.
+#
+# >>> import copy, pprint
+# >>> from nipype.interfaces.ants import Registration
+# >>> reg = Registration()
+# >>> reg.inputs.fixed_image = 'fixed1.nii'
+# >>> reg.inputs.moving_image = 'moving1.nii'
+# >>> reg.inputs.output_transform_prefix = "output_"
+# >>> reg.inputs.initial_moving_transform = 'trans.mat'
+# >>> reg.inputs.transforms = ['Affine', 'SyN']
+# >>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)]
+# >>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]]
+# >>> reg.inputs.dimension = 3
+# >>> reg.inputs.write_composite_transform = True
+# >>> reg.inputs.collapse_output_transforms = False
+# >>> reg.inputs.initialize_transforms_per_stage = False
+# >>> reg.inputs.metric = ['Mattes']*2
+# >>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs)
+# >>> reg.inputs.radius_or_number_of_bins = [32]*2
+# >>> reg.inputs.sampling_strategy = ['Random', None]
+# >>> reg.inputs.sampling_percentage = [0.05, None]
+# >>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9]
+# >>> reg.inputs.convergence_window_size = [20]*2
+# >>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]]
+# >>> reg.inputs.sigma_units = ['vox'] * 2
+# >>> reg.inputs.shrink_factors = [[2,1], [3,2,1]]
+# >>> reg.inputs.use_estimate_learning_rate_once = [True, True]
+# >>> reg.inputs.use_histogram_matching = [True, True] # This is the default
+# >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
+# >>> reg.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+# >>> reg.run() # doctest: +SKIP
+#
+# Same as reg1, but first invert the initial transform ('trans.mat') before applying it.
+#
+# >>> reg.inputs.invert_initial_moving_transform = True
+# >>> reg1 = copy.deepcopy(reg)
+# >>> reg1.inputs.winsorize_lower_quantile = 0.025
+# >>> reg1.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1'
+# >>> reg1.run() # doctest: +SKIP
+#
+# Clip extremely high intensity data points using winsorize_upper_quantile. All data points
+# higher than the 0.975 quantile are set to the value of the 0.975 quantile.
+#
+# >>> reg2 = copy.deepcopy(reg)
+# >>> reg2.inputs.winsorize_upper_quantile = 0.975
+# >>> reg2.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1'
+#
+# Clip extremely low intensity data points using winsorize_lower_quantile. All data points
+# lower than the 0.025 quantile are set to the original value at the 0.025 quantile.
+#
+#
+# >>> reg3 = copy.deepcopy(reg)
+# >>> reg3.inputs.winsorize_lower_quantile = 0.025
+# >>> reg3.inputs.winsorize_upper_quantile = 0.975
+# >>> reg3.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1'
+#
+# Use float instead of double for computations (saves memory usage)
+#
+# >>> reg3a = copy.deepcopy(reg)
+# >>> reg3a.inputs.float = True
+# >>> reg3a.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# Force to use double instead of float for computations (more precision and memory usage).
+#
+# >>> reg3b = copy.deepcopy(reg)
+# >>> reg3b.inputs.float = False
+# >>> reg3b.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# 'collapse_output_transforms' can be used to put all transformation in a single 'composite_transform'-
+# file. Note that forward_transforms will now be an empty list.
+#
+# >>> # Test collapse transforms flag
+# >>> reg4 = copy.deepcopy(reg)
+# >>> reg4.inputs.save_state = 'trans.mat'
+# >>> reg4.inputs.restore_state = 'trans.mat'
+# >>> reg4.inputs.initialize_transforms_per_stage = True
+# >>> reg4.inputs.collapse_output_transforms = True
+# >>> outputs = reg4._list_outputs()
+# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS,
+# {'composite_transform': '...data/output_Composite.h5',
+# 'elapsed_time': ,
+# 'forward_invert_flags': [],
+# 'forward_transforms': [],
+# 'inverse_composite_transform': '...data/output_InverseComposite.h5',
+# 'inverse_warped_image': ,
+# 'metric_value': ,
+# 'reverse_forward_invert_flags': [],
+# 'reverse_forward_transforms': [],
+# 'reverse_invert_flags': [],
+# 'reverse_transforms': [],
+# 'save_state': '...data/trans.mat',
+# 'warped_image': '...data/output_warped_image.nii.gz'}
+# >>> reg4.cmdline
+# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+#
+# >>> # Test collapse transforms flag
+# >>> reg4b = copy.deepcopy(reg4)
+# >>> reg4b.inputs.write_composite_transform = False
+# >>> outputs = reg4b._list_outputs()
+# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS,
+# {'composite_transform': ,
+# 'elapsed_time': ,
+# 'forward_invert_flags': [False, False],
+# 'forward_transforms': ['...data/output_0GenericAffine.mat',
+# '...data/output_1Warp.nii.gz'],
+# 'inverse_composite_transform': ,
+# 'inverse_warped_image': ,
+# 'metric_value': ,
+# 'reverse_forward_invert_flags': [False, False],
+# 'reverse_forward_transforms': ['...data/output_1Warp.nii.gz',
+# '...data/output_0GenericAffine.mat'],
+# 'reverse_invert_flags': [True, False],
+# 'reverse_transforms': ['...data/output_0GenericAffine.mat', '...data/output_1InverseWarp.nii.gz'],
+# 'save_state': '...data/trans.mat',
+# 'warped_image': '...data/output_warped_image.nii.gz'}
+# >>> reg4b.aggregate_outputs() # doctest: +SKIP
+# >>> reg4b.cmdline
+# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0'
+#
+# One can use multiple similarity metrics in a single registration stage.The Node below first
+# performs a linear registration using only the Mutual Information ('Mattes')-metric.
+# In a second stage, it performs a non-linear registration ('Syn') using both a
+# Mutual Information and a local cross-correlation ('CC')-metric. Both metrics are weighted
+# equally ('metric_weight' is .5 for both). The Mutual Information- metric uses 32 bins.
+# The local cross-correlations (correlations between every voxel's neighborhoods) is computed
+# with a radius of 4.
+#
+# >>> # Test multiple metrics per stage
+# >>> reg5 = copy.deepcopy(reg)
+# >>> reg5.inputs.fixed_image = 'fixed1.nii'
+# >>> reg5.inputs.moving_image = 'moving1.nii'
+# >>> reg5.inputs.metric = ['Mattes', ['Mattes', 'CC']]
+# >>> reg5.inputs.metric_weight = [1, [.5,.5]]
+# >>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ]
+# >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage
+# >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]]
+# >>> reg5.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# ANTS Registration can also use multiple modalities to perform the registration. Here it is assumed
+# that fixed1.nii and fixed2.nii are in the same space, and so are moving1.nii and
+# moving2.nii. First, a linear registration is performed matching fixed1.nii to moving1.nii,
+# then a non-linear registration is performed to match fixed2.nii to moving2.nii, starting from
+# the transformation of the first step.
+#
+# >>> # Test multiple inputS
+# >>> reg6 = copy.deepcopy(reg5)
+# >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii']
+# >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii']
+# >>> reg6.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# Different methods can be used for the interpolation when applying transformations.
+#
+# >>> # Test Interpolation Parameters (BSpline)
+# >>> reg7a = copy.deepcopy(reg)
+# >>> reg7a.inputs.interpolation = 'BSpline'
+# >>> reg7a.inputs.interpolation_parameters = (3,)
+# >>> reg7a.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# >>> # Test Interpolation Parameters (MultiLabel/Gaussian)
+# >>> reg7b = copy.deepcopy(reg)
+# >>> reg7b.inputs.interpolation = 'Gaussian'
+# >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0)
+# >>> reg7b.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# BSplineSyN non-linear registration with custom parameters.
+#
+# >>> # Test Extended Transform Parameters
+# >>> reg8 = copy.deepcopy(reg)
+# >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN']
+# >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)]
+# >>> reg8.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# Mask the fixed image in the second stage of the registration (but not the first).
+#
+# >>> # Test masking
+# >>> reg9 = copy.deepcopy(reg)
+# >>> reg9.inputs.fixed_image_masks = ['NULL', 'fixed1.nii']
+# >>> reg9.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+# Here we use both a warpfield and a linear transformation, before registration commences. Note that
+# the first transformation that needs to be applied ('ants_Warp.nii.gz') is last in the list of
+# 'initial_moving_transform'.
+#
+# >>> # Test initialization with multiple transforms matrices (e.g., unwarp and affine transform)
+# >>> reg10 = copy.deepcopy(reg)
+# >>> reg10.inputs.initial_moving_transform = ['func_to_struct.mat', 'ants_Warp.nii.gz']
+# >>> reg10.inputs.invert_initial_moving_transform = [False, False]
+# >>> reg10.cmdline
+# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1'
+#
+task_name: registration
+nipype_name: Registration
+nipype_module: nipype.interfaces.ants.registration
+inputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ fixed_image: medimage/nifti1+list-of
+ fixed_image_mask: generic/file
+ fixed_image_masks: generic/file+list-of
+ moving_image: medimage/nifti1+list-of
+ moving_image_mask: generic/file
+ moving_image_masks: generic/file+list-of
+ save_state: datascience/text-matrix
+ restore_state: datascience/text-matrix
+ initial_moving_transform: datascience/text-matrix+list-of
+ invert_initial_moving_transform: generic/file+list-of
+ metadata:
+ # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
+outputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ composite_transform: generic/file
+ inverse_composite_transform: generic/file
+ warped_image: generic/file
+ inverse_warped_image: generic/file
+ save_state: datascience/text-matrix
+ callables:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set to the `callable` attribute of output fields
+ templates:
+ # dict[str, str] - `output_file_template` values to be provided to output fields
+ requirements:
+ # dict[str, list[str]] - input fields that are required to be provided for the output field to be present
+tests:
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ fixed_image:
+ moving_image:
+ output_transform_prefix: '"output_"'
+ initial_moving_transform:
+ transforms: '["Affine", "SyN"]'
+ transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]'
+ number_of_iterations: '[[1500, 200], [100, 50, 30]]'
+ dimension: '3'
+ write_composite_transform: 'True'
+ collapse_output_transforms: 'False'
+ initialize_transforms_per_stage: 'False'
+ metric: '["Mattes"]*2'
+ metric_weight: '[1]*2 # Default (value ignored currently by ANTs)'
+ radius_or_number_of_bins: '[32]*2'
+ sampling_strategy: '["Random", None]'
+ sampling_percentage: '[0.05, None]'
+ convergence_threshold: '[1.e-8, 1.e-9]'
+ convergence_window_size: '[20]*2'
+ smoothing_sigmas: '[[1,0], [2,1,0]]'
+ sigma_units: '["vox"] * 2'
+ shrink_factors: '[[2,1], [3,2,1]]'
+ use_estimate_learning_rate_once: '[True, True]'
+ use_histogram_matching: '[True, True] # This is the default'
+ output_warped_image: '"output_warped_image.nii.gz"'
+ imports: &id001
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ - module: copy
+ - module: pprint
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ invert_initial_moving_transform:
+ winsorize_lower_quantile: '0.025'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ winsorize_upper_quantile: '0.975'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ winsorize_lower_quantile: '0.025'
+ winsorize_upper_quantile: '0.975'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ float: 'True'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ float: 'False'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ save_state:
+ restore_state:
+ initialize_transforms_per_stage: 'True'
+ collapse_output_transforms: 'True'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ write_composite_transform: 'False'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ fixed_image:
+ moving_image:
+ metric: '["Mattes", ["Mattes", "CC"]]'
+ metric_weight: '[1, [.5,.5]]'
+ radius_or_number_of_bins: '[32, [32, 4] ]'
+ sampling_strategy: '["Random", None] # use default strategy in second stage'
+ sampling_percentage: '[0.05, [0.05, 0.10]]'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ fixed_image:
+ moving_image:
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ interpolation: '"BSpline"'
+ interpolation_parameters: (3,)
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ interpolation: '"Gaussian"'
+ interpolation_parameters: (1.0, 1.0)
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ transforms: '["Affine", "BSplineSyN"]'
+ transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ fixed_image_masks:
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ initial_moving_transform:
+ invert_initial_moving_transform:
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+doctests:
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ fixed_image:
+ moving_image:
+ output_transform_prefix: '"output_"'
+ initial_moving_transform:
+ transforms: '["Affine", "SyN"]'
+ transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]'
+ number_of_iterations: '[[1500, 200], [100, 50, 30]]'
+ dimension: '3'
+ write_composite_transform: 'True'
+ collapse_output_transforms: 'False'
+ initialize_transforms_per_stage: 'False'
+ metric: '["Mattes"]*2'
+ metric_weight: '[1]*2 # Default (value ignored currently by ANTs)'
+ radius_or_number_of_bins: '[32]*2'
+ sampling_strategy: '["Random", None]'
+ sampling_percentage: '[0.05, None]'
+ convergence_threshold: '[1.e-8, 1.e-9]'
+ convergence_window_size: '[20]*2'
+ smoothing_sigmas: '[[1,0], [2,1,0]]'
+ sigma_units: '["vox"] * 2'
+ shrink_factors: '[[2,1], [3,2,1]]'
+ use_estimate_learning_rate_once: '[True, True]'
+ use_histogram_matching: '[True, True] # This is the default'
+ output_warped_image: '"output_warped_image.nii.gz"'
+ imports: *id001
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ invert_initial_moving_transform:
+ winsorize_lower_quantile: '0.025'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ winsorize_upper_quantile: '0.975'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ winsorize_lower_quantile: '0.025'
+ winsorize_upper_quantile: '0.975'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ float: 'True'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ float: 'False'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ save_state:
+ restore_state:
+ initialize_transforms_per_stage: 'True'
+ collapse_output_transforms: 'True'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ write_composite_transform: 'False'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ fixed_image:
+ moving_image:
+ metric: '["Mattes", ["Mattes", "CC"]]'
+ metric_weight: '[1, [.5,.5]]'
+ radius_or_number_of_bins: '[32, [32, 4] ]'
+ sampling_strategy: '["Random", None] # use default strategy in second stage'
+ sampling_percentage: '[0.05, [0.05, 0.10]]'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ fixed_image:
+ moving_image:
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ interpolation: '"BSpline"'
+ interpolation_parameters: (3,)
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ interpolation: '"Gaussian"'
+ interpolation_parameters: (1.0, 1.0)
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ transforms: '["Affine", "BSplineSyN"]'
+ transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ fixed_image_masks:
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
+- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ initial_moving_transform:
+ invert_initial_moving_transform:
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/example-specs/task/ants_registration_Registration.yaml b/example-specs/task/ants_registration_Registration.yaml
deleted file mode 100644
index 8f848476..00000000
--- a/example-specs/task/ants_registration_Registration.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
-task_name: Registration
-nipype_module: nipype.interfaces.ants.registration
-output_requirements:
- output_warped_image: ["fixed_image", "moving_image", "output_transform_prefix"]
-output_templates:
- output_warped_image: "{output_transform_prefix}warped"
-doctest:
- fixed_image: test.nii.gz
- moving_image: test.nii.gz
- cmdline: >-
- antsRegistration --output [ output_, output_warped_image.nii.gz ]
- --metric Mattes[ test.nii, test.nii, 1, 32, Random, 0.05 ]
- tests_inputs: []
- tests_outputs:
- - AttributeError
-
\ No newline at end of file
diff --git a/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml b/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml
deleted file mode 100644
index 7d2b8fdf..00000000
--- a/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-nipype_module: nipype.interfaces.ants.segmentation
-task_name: N4BiasFieldCorrection
-output_requirements:
- output_image: []
- bias_image: ["save_bias"]
-output_templates:
- output_image: ""
-doctest:
- input_image: test.nii.gz
- cmdline:
-tests_inputs: []
-tests_outputs: [] # - AttributeError
diff --git a/example-specs/task/apply_vol_transform.yaml b/example-specs/task/apply_vol_transform.yaml
new file mode 100644
index 00000000..020a38f0
--- /dev/null
+++ b/example-specs/task/apply_vol_transform.yaml
@@ -0,0 +1,159 @@
+# This file is used to manually specify the semi-automatic conversion of
+# 'nipype.interfaces.freesurfer.preprocess.ApplyVolTransform' from Nipype to Pydra.
+#
+# Please fill-in/edit the fields below where appropriate
+#
+# Inputs
+# ------
+# source_file : file
+# Input volume you wish to transform
+# transformed_file : file
+# Output volume
+# target_file : file
+# Output template volume
+# tal : bool
+# map to a sub FOV of MNI305 (with --reg only)
+# tal_resolution : float
+# Resolution to sample when using tal
+# fs_target : bool
+# use orig.mgz from subject in regfile as target
+# reg_file : file
+# tkRAS-to-tkRAS matrix (tkregister2 format)
+# lta_file : file
+# Linear Transform Array file
+# lta_inv_file : file
+# LTA, invert
+# fsl_reg_file : file
+# fslRAS-to-fslRAS matrix (FSL format)
+# xfm_reg_file : file
+# ScannerRAS-to-ScannerRAS matrix (MNI format)
+# reg_header : bool
+# ScannerRAS-to-ScannerRAS matrix = identity
+# mni_152_reg : bool
+# target MNI152 space
+# subject : str
+# set matrix = identity and use subject for any templates
+# inverse : bool
+# sample from target to source
+# interp : enum
+# Interpolation method ( or nearest)
+# no_resample : bool
+# Do not resample; just change vox2ras matrix
+# m3z_file : file
+# This is the morph to be applied to the volume. Unless the morph is in mri/transforms (eg.: for talairach.m3z computed by reconall), you will need to specify the full path to this morph and use the --noDefM3zPath flag.
+# no_ded_m3z_path : bool
+# To be used with the m3z flag. Instructs the code not to look for them3z morph in the default location (SUBJECTS_DIR/subj/mri/transforms), but instead just use the path indicated in --m3z.
+# invert_morph : bool
+# Compute and use the inverse of the non-linear morph to resample the input volume. To be used by --m3z.
+# subjects_dir : directory
+# subjects directory
+# args : str
+# Additional parameters to the command
+# environ : dict
+# Environment variables
+#
+# Outputs
+# -------
+# transformed_file : file
+# Path to output file if used normally
+#
+# Docs
+# ----
+# Use FreeSurfer mri_vol2vol to apply a transform.
+#
+# Examples
+# --------
+#
+# >>> from nipype.interfaces.freesurfer import ApplyVolTransform
+# >>> applyreg = ApplyVolTransform()
+# >>> applyreg.inputs.source_file = 'structural.nii'
+# >>> applyreg.inputs.reg_file = 'register.dat'
+# >>> applyreg.inputs.transformed_file = 'struct_warped.nii'
+# >>> applyreg.inputs.fs_target = True
+# >>> applyreg.cmdline
+# 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii'
+#
+#
+task_name: apply_vol_transform
+nipype_name: ApplyVolTransform
+nipype_module: nipype.interfaces.freesurfer.preprocess
+inputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ source_file: medimage/nifti1
+ target_file: medimage/nifti1
+ reg_file: datascience/dat-file
+ lta_file: generic/file
+ lta_inv_file: generic/file
+ fsl_reg_file: generic/file
+ xfm_reg_file: generic/file
+ m3z_file: generic/file
+ metadata:
+ # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
+outputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ transformed_file: medimage/nifti1
+ callables:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set to the `callable` attribute of output fields
+ templates:
+ # dict[str, str] - `output_file_template` values to be provided to output fields
+ transformed_file: '"struct_warped.nii"'
+ requirements:
+ # dict[str, list[str]] - input fields that are required to be provided for the output field to be present
+tests:
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ source_file:
+ reg_file:
+ transformed_file: '"struct_warped.nii"'
+ fs_target: 'True'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+doctests:
+- cmdline: mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ source_file:
+ reg_file:
+ transformed_file: '"struct_warped.nii"'
+ fs_target: 'True'
+ imports:
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive: ''''
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/example-specs/task/extract_roi.yaml b/example-specs/task/extract_roi.yaml
new file mode 100644
index 00000000..57391826
--- /dev/null
+++ b/example-specs/task/extract_roi.yaml
@@ -0,0 +1,146 @@
+# This file is used to manually specify the semi-automatic conversion of
+# 'nipype.interfaces.fsl.utils.ExtractROI' from Nipype to Pydra.
+#
+# Please fill-in/edit the fields below where appropriate
+#
+# Inputs
+# ------
+# in_file : file
+# input file
+# roi_file : file
+# output file
+# x_min : int
+#
+# x_size : int
+#
+# y_min : int
+#
+# y_size : int
+#
+# z_min : int
+#
+# z_size : int
+#
+# t_min : int
+#
+# t_size : int
+#
+# crop_list : list
+# list of two tuples specifying crop options
+# output_type : enum
+# FSL output type
+# args : str
+# Additional parameters to the command
+# environ : dict
+# Environment variables
+#
+# Outputs
+# -------
+# roi_file : file
+#
+#
+# Docs
+# ----
+# Uses FSL Fslroi command to extract region of interest (ROI)
+# from an image.
+#
+# You can a) take a 3D ROI from a 3D data set (or if it is 4D, the
+# same ROI is taken from each time point and a new 4D data set is
+# created), b) extract just some time points from a 4D data set, or
+# c) control time and space limits to the ROI. Note that the
+# arguments are minimum index and size (not maximum index). So to
+# extract voxels 10 to 12 inclusive you would specify 10 and 3 (not
+# 10 and 12).
+#
+#
+# Examples
+# --------
+#
+# >>> from nipype.interfaces.fsl import ExtractROI
+# >>> from nipype.testing import anatfile
+# >>> fslroi = ExtractROI(in_file=anatfile, roi_file='bar.nii', t_min=0,
+# ... t_size=1)
+# >>> fslroi.cmdline == 'fslroi %s bar.nii 0 1' % anatfile
+# True
+#
+#
+#
+task_name: extract_roi
+nipype_name: ExtractROI
+nipype_module: nipype.interfaces.fsl.utils
+inputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ in_file: medimage/nifti1
+ metadata:
+ # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1)
+outputs:
+ omit:
+ # list[str] - fields to omit from the Pydra interface
+ rename:
+ # dict[str, str] - fields to rename in the Pydra interface
+ types:
+ # dict[str, type] - override inferred types (use "mime-like" string for file-format types,
+ # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ # from the nipype interface, but you may want to be more specific, particularly
+ # for file types, where specifying the format also specifies the file that will be
+ # passed to the field in the automatically generated unittests.
+ roi_file: medimage/nifti1
+ callables:
+ # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py`
+ # to set to the `callable` attribute of output fields
+ templates:
+ # dict[str, str] - `output_file_template` values to be provided to output fields
+ roi_file: '"bar.nii"'
+ requirements:
+ # dict[str, list[str]] - input fields that are required to be provided for the output field to be present
+tests:
+- inputs:
+ # dict[str, str] - values to provide to inputs fields in the task initialisation
+ # (if not specified, will try to choose a sensible value)
+ in_file:
+ roi_file: '"bar.nii"'
+ t_min: '0'
+ t_size: '1'
+ imports: &id001
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ - module: nipype.testing
+ name: anatfile
+ alias:
+ expected_outputs:
+ # dict[str, str] - expected values for selected outputs, noting that tests will typically
+ # be terminated before they complete for time-saving reasons, and therefore
+ # these values will be ignored, when running in CI
+ timeout: 10
+ # int - the value to set for the timeout in the generated test,
+ # after which the test will be considered to have been initialised
+ # successfully. Set to 0 to disable the timeout (warning, this could
+ # lead to the unittests taking a very long time to complete)
+ xfail: true
+ # bool - whether the unittest is expected to fail or not. Set to false
+ # when you are satisfied with the edits you have made to this file
+doctests:
+- cmdline:
+ # str - the expected cmdline output
+ inputs:
+ # dict[str, str] - name-value pairs for inputs to be provided to the doctest.
+ # If the field is of file-format type and the value is None, then the
+ # '.mock()' method of the corresponding class is used instead.
+ in_file:
+ roi_file: '"bar.nii"'
+ t_min: '0'
+ t_size: '1'
+ imports: *id001
+ # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item
+ # consisting of 'module', 'name', and optionally 'alias' keys
+ directive:
+ # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS
diff --git a/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml b/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml
deleted file mode 100644
index 14fe49e4..00000000
--- a/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
-nipype_module: nipype.interfaces.freesurfer.preprocess
-task_name: ApplyVolTransform
-output_requirements: []
-output_templates:
- transformed_file: "{source_file}_warped"
-doctest:
- source_file: test.nii.gz
- cmdline: mri_vol2vol
-tests_inputs: []
-tests_outputs: []
\ No newline at end of file
diff --git a/example-specs/task/fsl_utils_ExtractROI.yaml b/example-specs/task/fsl_utils_ExtractROI.yaml
deleted file mode 100644
index 37c6f5df..00000000
--- a/example-specs/task/fsl_utils_ExtractROI.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-task_name: ExtractROI
-nipype_module: nipype.interfaces.fsl.utils
-output_requirements:
- roi_file: [in_file]
-output_templates:
- roi_file: "{in_file}_trim"
-inputs_drop:
- - crop_list
-doctest:
- in_file: test.nii.gz
- t_min: 0
- t_size: 3
- roi_file: test_trim.nii.gz
- cmdline: fslroi test.nii.gz test_trim.nii.gz 0 3
-tests_inputs: []
- # - in_file: test.nii.gz
- # t_min: 0
- # t_size: 1
-tests_outputs: []
- # - roi_file
\ No newline at end of file
diff --git a/nipype-interfaces-to-import.yaml b/nipype-interfaces-to-import.yaml
index 0d335188..535ae29b 100644
--- a/nipype-interfaces-to-import.yaml
+++ b/nipype-interfaces-to-import.yaml
@@ -220,17 +220,12 @@ interfaces:
camino2trackvis/convert:
- Camino2Trackvis
- Trackvis2Camino
- cat12/base:
- - Cell
- - NestedCell
cat12/preprocess:
- CAT12Segment
- CAT12SANLMDenoising
- - Cell2Str
cat12/surface:
- ExtractAdditionalSurfaceParameters
- ExtractROIBasedSurfaceMeasures
- - Cell2Str
cmtk/cmtk:
- CreateMatrix
- ROIGen
@@ -354,7 +349,6 @@ interfaces:
- DICOMConvert
- Resample
- ReconAll
- - BBRegisterInputSpec6
- BBRegister
- ApplyVolTransform
- Smooth
@@ -453,54 +447,30 @@ interfaces:
- Classifier
- Cleaner
fsl/maths:
- - MathsInput
- - MathsOutput
- MathsCommand
- - ChangeDataTypeInput
- ChangeDataType
- Threshold
- - StdImageInput
- StdImage
- - MeanImageInput
- MeanImage
- - MaxImageInput
- MaxImage
- - PercentileImageInput
- PercentileImage
- - MaxnImageInput
- MaxnImage
- - MinImageInput
- MinImage
- - MedianImageInput
- MedianImage
- - AR1ImageInput
- AR1Image
- - IsotropicSmoothInput
- IsotropicSmooth
- - ApplyMaskInput
- ApplyMask
- - KernelInput
- - DilateInput
- DilateImage
- - ErodeInput
- ErodeImage
- - SpatialFilterInput
- SpatialFilter
- - UnaryMathsInput
- UnaryMaths
- - BinaryMathsInput
- BinaryMaths
- - MultiImageMathsInput
- MultiImageMaths
- - TemporalFilterInput
- TemporalFilter
fsl/model:
- Level1Design
- FEAT
- FEATModel
- - FILMGLSInputSpec505
- - FILMGLSInputSpec507
- - FILMGLSOutputSpec507
- FILMGLS
- FEATRegister
- FLAMEO
@@ -634,35 +604,22 @@ interfaces:
niftyseg/em:
- EM
niftyseg/label_fusion:
- - LabelFusionInput
- - LabelFusionOutput
- LabelFusion
- CalcTopNCC
niftyseg/lesions:
- FillLesions
niftyseg/maths:
- - MathsInput
- - MathsOutput
- MathsCommand
- - UnaryMathsInput
- UnaryMaths
- - BinaryMathsInput
- BinaryMaths
- - BinaryMathsInputInteger
- BinaryMathsInteger
- - TupleMathsInput
- TupleMaths
- - MergeInput
- Merge
niftyseg/patchmatch:
- PatchMatch
niftyseg/stats:
- - StatsInput
- - StatsOutput
- StatsCommand
- - UnaryStatsInput
- UnaryStats
- - BinaryStatsInput
- BinaryStats
nilearn:
- NilearnBaseInterface
@@ -922,11 +879,7 @@ interfaces:
- CalcCoregAffine
- ApplyTransform
- Reslice
- - ApplyInverseDeformationInput
- - ApplyInverseDeformationOutput
- ApplyInverseDeformation
- - ResliceToReferenceInput
- - ResliceToReferenceOutput
- ResliceToReference
- DicomImport
vista/vista:
diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py
index 7cbeb531..f6449790 100644
--- a/nipype2pydra/task.py
+++ b/nipype2pydra/task.py
@@ -1,49 +1,391 @@
import os
from pathlib import Path
import typing as ty
-from types import ModuleType
import re
+from importlib import import_module
+from types import ModuleType
import inspect
import black
-import traits
+import traits.trait_types
+import json
import attrs
+from attrs.converters import default_if_none
import nipype.interfaces.base
from nipype.interfaces.base import traits_extension
from pydra.engine import specs
from pydra.engine.helpers import ensure_list
-from .utils import import_module_from_path
+from .utils import import_module_from_path, is_fileset
+from fileformats.core import from_mime
+from fileformats.generic import File
+
+
+T = ty.TypeVar("T")
+
+
+def from_dict_converter(
+ obj: ty.Union[T, dict], klass: ty.Type[T], allow_none=False
+) -> T:
+ if obj is None:
+ if allow_none:
+ converted = None
+ else:
+ converted = klass()
+ elif isinstance(obj, dict):
+ converted = klass(**obj)
+ elif isinstance(obj, klass):
+ converted = obj
+ else:
+ raise TypeError(
+ f"Input must be of type {klass} or dict, not {type(obj)}: {obj}"
+ )
+ return converted
+
+
+def str_to_type(type_str: str) -> type:
+ """Resolve a string representation of a type into a valid type"""
+ if "/" in type_str:
+ tp = from_mime(type_str)
+ try:
+ # If datatype is a field, use its primitive instead
+ tp = tp.primitive # type: ignore
+ except AttributeError:
+ pass
+ elif "." in type_str:
+ parts = type_str.split(".")
+ module = import_module(".".join(parts[:-1]))
+ tp = getattr(module, parts[-1])
+ if not inspect.isclass(tp):
+ raise TypeError(f"Designated type at {type_str} is not a class {tp}")
+ elif re.match(r"^\w+$", type_str):
+ tp = eval(type_str)
+ else:
+ raise ValueError(f"Cannot parse {type_str} to a type safely")
+ return tp
+
+
+def types_converter(types: ty.Dict[str, ty.Union[str, type]]) -> ty.Dict[str, type]:
+ if types is None:
+ return {}
+ converted = {}
+ for name, tp_or_str in types.items():
+ if isinstance(tp_or_str, str):
+ tp = str_to_type(tp_or_str)
+ converted[name] = tp
+ return converted
+
+
+@attrs.define
+class ImportStatement:
+ module: str
+ name: ty.Optional[str] = None
+ alias: ty.Optional[str] = None
+
+
+def from_list_to_imports(
+ obj: ty.Union[ty.List[ImportStatement], list]
+) -> ty.List[ImportStatement]:
+ if obj is None:
+ return []
+ return [from_dict_converter(t, ImportStatement) for t in obj]
+
+
+@attrs.define
+class SpecConverter:
+ omit: ty.List[str] = attrs.field(
+ factory=list,
+ converter=default_if_none(factory=list), # type: ignore
+ metadata={"help": "fields to omit from the Pydra interface"},
+ )
+ rename: ty.Dict[str, str] = attrs.field(
+ factory=dict,
+ converter=default_if_none(factory=dict), # type: ignore
+ metadata={"help": "fields to rename in the Pydra interface"},
+ )
+ types: ty.Dict[str, type] = attrs.field(
+ converter=types_converter,
+ factory=dict,
+ metadata={
+ "help": """override inferred types (use \"mime-like\" string for file-format types,
+ e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred
+ from the nipype interface, but you may want to be more specific, particularly
+ for file types, where specifying the format also specifies the file that will be
+ passed to the field in the automatically generated unittests."""
+ },
+ )
+
+
+@attrs.define
+class InputsConverter(SpecConverter):
+ """Specification of how to conver Nipype inputs into Pydra inputs
+
+ Parameters
+ ----------
+ omit : list[str], optional
+ input fields to omit from the Pydra interface
+ rename : dict[str, str], optional
+ input fields to rename in the Pydra interface
+ types : dict[str, type], optional
+ Override inferred type (use mime-type string for file-format types).
+ Most of the time the correct type will be inferred from the nipype interface,
+ but you may want to be more specific, typically for the case of file types
+ where specifying the format will change the type of file that will be
+ passed to the field in the automatically generated unittests.
+ metadata: dict[str, dict[str, Any]], optional
+ additional metadata to set on any of the input fields (e.g. out_file: position: 1)
+ """
+
+ metadata: ty.Dict[str, ty.Dict[str, ty.Any]] = attrs.field(
+ factory=dict,
+ converter=default_if_none(factory=dict), # type: ignore
+ metadata={
+ "help": "additional metadata to set on any of the input fields (e.g. out_file: position: 1)"
+ },
+ )
+
+
+@attrs.define
+class OutputsConverter(SpecConverter):
+ """Specification of how to conver Nipype outputs into Pydra outputs
+
+ Parameters
+ ----------
+ omit : list[str], optional
+ input fields to omit from the Pydra interface
+ rename : dict[str, str], optional
+ input fields to rename in the Pydra interface
+ types : dict[str, type], optional
+ types to set explicitly (i.e. instead of determining from nipype interface),
+ particularly relevant for file-types, where specifying the format will determine
+ the type of file that is passed to the field in the automatically generated unittests
+ callables : dict[str, str or callable], optional
+ names of methods/callable classes defined in the adjacent `*_callables.py`
+ to set to the `callable` attribute of output fields
+ templates : dict[str, str], optional
+ `output_file_template` values to be provided to output fields
+ requirements : dict[str, list[str]]
+ input fields that are required to be provided for the output field to be present
+ """
+
+ callables: ty.Dict[str, str] = attrs.field(
+ factory=dict,
+ converter=default_if_none(factory=dict), # type: ignore
+ metadata={
+ "help": """names of methods/callable classes defined in the adjacent `*_callables.py`
+ to set to the `callable` attribute of output fields"""
+ },
+ )
+ templates: ty.Dict[str, str] = attrs.field(
+ factory=dict,
+ converter=default_if_none(factory=dict), # type: ignore
+ metadata={
+ "help": "`output_file_template` values to be provided to output fields"
+ },
+ )
+ requirements: ty.Dict[str, ty.List[str]] = attrs.field(
+ factory=dict,
+ converter=default_if_none(factory=dict), # type: ignore
+ metadata={
+ "help": "input fields that are required to be provided for the output field to be present"
+ },
+ )
+
+ @callables.validator
+ def callables_validator(self, _, output_callables: dict):
+ overlapping = set(output_callables.keys()) & set(self.templates.keys())
+ if overlapping:
+ raise ValueError(
+ f"callables and templates have overlapping same keys: {overlapping}"
+ )
+
+
+@attrs.define
+class TestGenerator:
+ """Specifications for the automatically generated test for the generated Nipype spec
+
+ Parameters
+ ----------
+ inputs : dict[str, str], optional
+ values to provide to specific inputs fields (if not provided, a sensible value
+ within the valid range will be provided)
+ imports : list[ImportStatement or dict]
+ list import statements required by the test, with each list item
+ consisting of 'module', 'name', and optionally 'alias' keys
+ expected_outputs: dict[str, str], optional
+ expected values for selected outputs, noting that in tests will typically
+ be terminated before they complete for time-saving reasons and will therefore
+ be ignored
+ timeout: int, optional
+ the time to wait for in order to be satisfied that the tool has been initialised
+ and performs any internal validation before exiting
+ """
+
+ inputs: ty.Dict[str, str] = attrs.field(
+ factory=dict,
+ converter=default_if_none(factory=dict), # type: ignore
+ metadata={
+ "help": """values to provide to inputs fields in the task initialisation
+ (if not specified, will try to choose a sensible value)"""
+ },
+ )
+ imports: ty.List[ImportStatement] = attrs.field(
+ factory=list,
+ converter=from_list_to_imports,
+ metadata={
+ "help": """list import statements required by the test, with each list item
+ consisting of 'module', 'name', and optionally 'alias' keys"""
+ },
+ )
+ expected_outputs: ty.Dict[str, str] = attrs.field(
+ factory=dict,
+ converter=default_if_none(factory=dict), # type: ignore
+ metadata={
+ "help": """expected values for selected outputs, noting that tests will typically
+ be terminated before they complete for time-saving reasons, and therefore
+ these values will be ignored, when running in CI"""
+ },
+ )
+ timeout: int = attrs.field(
+ default=10,
+ metadata={
+ "help": """the value to set for the timeout in the generated test,
+ after which the test will be considered to have been initialised
+ successfully. Set to 0 to disable the timeout (warning, this could
+ lead to the unittests taking a very long time to complete)"""
+ },
+ )
+ xfail: bool = attrs.field(
+ default=True,
+ metadata={
+ "help": """whether the unittest is expected to fail or not. Set to false
+ when you are satisfied with the edits you have made to this file"""
+ },
+ )
+
+
+@attrs.define
+class DocTestGenerator:
+ """Specifies how the doctest should be constructed
+
+ Parameters
+ ----------
+ cmdline: str
+ the expected cmdline output
+ inputs : dict[str, str or None]
+ name-value pairs for inputs to be provided to the doctest. If the value is None
+ then the ".mock()" method of the corresponding class is used instead.
+ imports : list[ImportStatement or dict]
+ list import statements required by the test, with each list item
+ consisting of 'module', 'name', and optionally 'alias' keys
+ directive : str
+ any doctest directive to be applied to the cmdline line
+ """
+
+ cmdline: str = attrs.field(metadata={"help": "the expected cmdline output"})
+ inputs: ty.Dict[str, str] = attrs.field(
+ factory=dict,
+ metadata={
+ "help": """name-value pairs for inputs to be provided to the doctest.
+ If the field is of file-format type and the value is None, then the
+ '.mock()' method of the corresponding class is used instead."""
+ },
+ )
+ imports: ty.List[ImportStatement] = attrs.field(
+ factory=list,
+ converter=from_list_to_imports,
+ metadata={
+ "help": """list import statements required by the test, with each list item
+ consisting of 'module', 'name', and optionally 'alias' keys"""
+ },
+ )
+ directive: str = attrs.field(
+ default=None,
+ metadata={
+ "help": "any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS"
+ },
+ )
+
+
+def from_dict_to_inputs(obj: ty.Union[InputsConverter, dict]) -> InputsConverter:
+ return from_dict_converter(obj, InputsConverter)
+
+
+def from_dict_to_outputs(obj: ty.Union[OutputsConverter, dict]) -> OutputsConverter:
+ return from_dict_converter(obj, OutputsConverter)
+
+
+def from_list_to_tests(
+ obj: ty.Union[ty.List[TestGenerator], list]
+) -> ty.List[TestGenerator]:
+ if obj is None:
+ return []
+ return [from_dict_converter(t, TestGenerator) for t in obj]
+
+
+def from_list_to_doctests(
+ obj: ty.Union[ty.List[DocTestGenerator], list]
+) -> ty.List[DocTestGenerator]:
+ if obj is None:
+ return []
+ return [from_dict_converter(t, DocTestGenerator) for t in obj]
+
@attrs.define
class TaskConverter:
+ """Specifies how the semi-automatic conversion from Nipype to Pydra should
+ be performed
+ Parameters
+ ----------
task_name: str
- nipype_module: ModuleType = attrs.field(converter=import_module_from_path)
- output_requirements: dict = attrs.field(factory=dict)
- inputs_metadata: dict = attrs.field(factory=dict)
- inputs_drop: dict = attrs.field(factory=dict)
- output_templates: dict = attrs.field(factory=dict)
- output_callables: dict = attrs.field(factory=dict)
- doctest: dict = attrs.field(factory=dict)
- tests_inputs: list = attrs.field(factory=list)
- tests_outputs: list = attrs.field(factory=list)
+ name of the Pydra task
+ nipype_module: str or ModuleType
+ the nipype module or module path containing the Nipype interface
+ nipype_name: str, optional
+ the name of the task in the nipype module, defaults to the output task_name
+ output_module: str
+ relative path to the package root to write the output module to ('.' delimited)
+ inputs: InputsConverter or dict
+ specficiations for the conversion of inputs
+ outputs: OutputsConverter or dict
+ specficiations for the conversion of inputs
+ tests: ty.List[TestGenerator] or list, optional
+ specficiations for how to construct the test. A default test is generated if no
+ specs are provided
+ doctests: ty.List[DocTestGenerator] or list, optional
+ specifications for how to construct the docttest. Doctest is omitted if not
+ provided
+ callables_module: ModuleType or str, optional
+ a module, or path to a module, containing any required callables
+ """
+
+ task_name: str
+ nipype_name: str
+ nipype_module: ModuleType = attrs.field(
+ converter=lambda m: import_module(m) if not isinstance(m, ModuleType) else m
+ )
output_module: str = attrs.field(default=None)
+ inputs: InputsConverter = attrs.field(
+ factory=InputsConverter, converter=from_dict_to_inputs
+ )
+ outputs: OutputsConverter = attrs.field( # type: ignore
+ factory=OutputsConverter,
+ converter=from_dict_to_outputs,
+ )
callables_module: ModuleType = attrs.field(
converter=import_module_from_path, default=None
)
-
- @output_callables.validator
- def output_callables_validator(self, _, output_callables: dict):
- if not output_callables.keys().isdisjoint(self.output_templates.keys()):
- raise Exception("output_callables and output_templates have the same keys")
+ tests: ty.List[TestGenerator] = attrs.field( # type: ignore
+ factory=list, converter=from_list_to_tests
+ )
+ doctests: ty.List[DocTestGenerator] = attrs.field(
+ factory=list, converter=from_list_to_doctests
+ )
def __attrs_post_init__(self):
if self.output_module is None:
if self.nipype_module.__name__.startswith("nipype.interfaces."):
- self.output_module = (
- "pydra.tasks."
- + self.nipype_module.__name__[len("nipype.interfaces.") :]
- + "." + self.task_name.lower()
- )
+ pkg_name = self.nipype_module.__name__.split(".")[2]
+ self.output_module = f"pydra.tasks.{pkg_name}.auto.{self.task_name}"
else:
raise RuntimeError(
"Output-module needs to be explicitly provided to task converter "
@@ -54,15 +396,15 @@ def __attrs_post_init__(self):
@property
def nipype_interface(self) -> nipype.interfaces.base.BaseInterface:
- return getattr(self.nipype_module, self.task_name)
+ return getattr(self.nipype_module, self.nipype_name)
@property
def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec:
- return self.nipype_interface.input_spec()
+ return self.nipype_interface.input_spec() if self.nipype_interface.input_spec else None
@property
def nipype_output_spec(self) -> nipype.interfaces.base.BaseTraitedSpec:
- return self.nipype_interface.output_spec()
+ return self.nipype_interface.output_spec() if self.nipype_interface.output_spec else None
def generate(self, package_root: Path):
"""creating pydra input/output spec from nipype specs
@@ -71,16 +413,40 @@ def generate(self, package_root: Path):
input_fields, inp_templates = self.convert_input_fields()
output_fields = self.convert_output_spec(fields_from_template=inp_templates)
- output_file = Path(package_root).joinpath(*self.output_module.split(".")).with_suffix(".py")
+ nonstd_types = set()
+
+ def add_nonstd_types(tp):
+ if ty.get_origin(tp) in (list, ty.Union):
+ for tp_arg in ty.get_args(tp):
+ add_nonstd_types(tp_arg)
+ elif tp.__module__ not in ["builtins", "pathlib", "typing"]:
+ nonstd_types.add(tp)
+
+ for f in input_fields:
+ add_nonstd_types(f[1])
+
+ output_file = (
+ Path(package_root)
+ .joinpath(*self.output_module.split("."))
+ .with_suffix(".py")
+ )
testdir = output_file.parent / "tests"
- testdir.mkdir(parents=True)
+ testdir.mkdir(parents=True, exist_ok=True)
- self.write_task(output_file, input_fields, output_fields)
+ self.write_task(
+ output_file,
+ input_fields=input_fields,
+ output_fields=output_fields,
+ nonstd_types=nonstd_types,
+ )
- filename_test = testdir / f"test_spec_{self.task_name.lower()}.py"
- filename_test_run = testdir / f"test_run_{self.task_name.lower()}.py"
- self.write_test(filename_test=filename_test)
- self.write_test(filename_test=filename_test_run, run=True)
+ filename_test = testdir / f"test_{self.task_name.lower()}.py"
+ # filename_test_run = testdir / f"test_run_{self.task_name.lower()}.py"
+ self.write_tests(
+ filename_test,
+ input_fields=input_fields,
+ nonstd_types=nonstd_types,
+ )
def convert_input_fields(self):
"""creating fields list for pydra input spec"""
@@ -90,7 +456,7 @@ def convert_input_fields(self):
for name, fld in self.nipype_input_spec.traits().items():
if name in self.TRAITS_IRREL:
continue
- if name in self.inputs_drop:
+ if name in self.inputs.omit:
continue
fld_pdr, pos = self.pydra_fld_input(fld, name)
meta_pdr = fld_pdr[-1]
@@ -106,8 +472,8 @@ def convert_input_fields(self):
def pydra_fld_input(self, field, nm):
"""converting a single nipype field to one element of fields for pydra input_spec"""
tp_pdr = self.pydra_type_converter(field, spec_type="input", name=nm)
- if nm in self.inputs_metadata:
- metadata_extra_spec = self.inputs_metadata[nm]
+ if nm in self.inputs.metadata:
+ metadata_extra_spec = self.inputs.metadata[nm]
else:
metadata_extra_spec = {}
@@ -133,21 +499,29 @@ def pydra_fld_input(self, field, nm):
if getattr(field, "name_template"):
template = getattr(field, "name_template")
name_source = ensure_list(getattr(field, "name_source"))
-
- metadata_pdr["output_file_template"] = self.string_formats(
- argstr=template, name=name_source[0]
- )
+ if name_source:
+ tmpl = self.string_formats(
+ argstr=template, name=name_source[0]
+ )
+ else:
+ tmpl = template
+ metadata_pdr["output_file_template"] = tmpl
if tp_pdr in [specs.File, specs.Directory]:
tp_pdr = str
elif getattr(field, "genfile"):
- if nm in self.output_templates:
- metadata_pdr["output_file_template"] = self.output_templates[nm]
+ if nm in self.outputs.templates:
+ try:
+ metadata_pdr["output_file_template"] = self.outputs.templates[nm]
+ except KeyError:
+ raise Exception(
+ f"{nm} is has genfile=True and therefore needs an 'output_file_template' value"
+ )
if tp_pdr in [
specs.File,
specs.Directory,
]: # since this is a template, the file doesn't exist
- tp_pdr = str
- elif nm not in self.output_callables:
+ tp_pdr = Path
+ elif nm not in self.outputs.callables:
raise Exception(
f"the filed {nm} has genfile=True, but no output template or callables_module provided"
)
@@ -164,8 +538,10 @@ def pydra_fld_input(self, field, nm):
def convert_output_spec(self, fields_from_template):
"""creating fields list for pydra input spec"""
fields_pdr_l = []
+ if not self.nipype_output_spec:
+ return fields_pdr_l
for name, fld in self.nipype_output_spec.traits().items():
- if name in self.output_requirements and name not in fields_from_template:
+ if name in self.outputs.requirements and name not in fields_from_template:
fld_pdr = self.pydra_fld_output(fld, name)
fields_pdr_l.append((name,) + fld_pdr)
return fields_pdr_l
@@ -181,14 +557,14 @@ def pydra_fld_output(self, field, name):
if val:
metadata_pdr[key_nm_pdr] = val
- if self.output_requirements[name]:
- if all([isinstance(el, list) for el in self.output_requirements[name]]):
- requires_l = self.output_requirements[name]
+ if self.outputs.requirements[name]:
+ if all([isinstance(el, list) for el in self.outputs.requirements[name]]):
+ requires_l = self.outputs.requirements[name]
nested_flag = True
elif all(
- [isinstance(el, (str, dict)) for el in self.output_requirements[name]]
+ [isinstance(el, (str, dict)) for el in self.outputs.requirements[name]]
):
- requires_l = [self.output_requirements[name]]
+ requires_l = [self.outputs.requirements[name]]
nested_flag = False
else:
Exception("has to be either list of list or list of str/dict")
@@ -205,16 +581,16 @@ def pydra_fld_output(self, field, name):
if nested_flag is False:
metadata_pdr["requires"] = metadata_pdr["requires"][0]
- if name in self.output_templates:
+ if name in self.outputs.templates:
metadata_pdr["output_file_template"] = self.interface_spec[
"output_templates"
][name]
- elif name in self.output_callables:
- metadata_pdr["callable"] = self.output_callables[name]
+ elif name in self.outputs.callables:
+ metadata_pdr["callable"] = self.outputs.callables[name]
return (tp_pdr, metadata_pdr)
def function_callables(self):
- if not self.output_callables:
+ if not self.outputs.callables:
return ""
python_functions_spec = (
Path(os.path.dirname(__file__)) / "../specs/callables.py"
@@ -224,7 +600,7 @@ def function_callables(self):
"specs/callables.py file is needed if output_callables in the spec files"
)
fun_str = ""
- fun_names = list(set(self.output_callables.values()))
+ fun_names = list(set(self.outputs.callables.values()))
fun_names.sort()
for fun_nm in fun_names:
fun = getattr(self.callables_module, fun_nm)
@@ -237,6 +613,11 @@ def pydra_type_converter(self, field, spec_type, name):
raise Exception(
f"spec_type has to be input or output, but {spec_type} provided"
)
+ types_dict = self.inputs.types if spec_type == "input" else self.outputs.types
+ try:
+ return types_dict[name]
+ except KeyError:
+ pass
tp = field.trait_type
if isinstance(tp, traits.trait_types.Int):
tp_pdr = int
@@ -250,7 +631,7 @@ def pydra_type_converter(self, field, spec_type, name):
tp_pdr = dict
elif isinstance(tp, traits_extension.InputMultiObject):
if isinstance(field.inner_traits[0].trait_type, traits_extension.File):
- tp_pdr = specs.MultiInputFile
+ tp_pdr = ty.List[File]
else:
tp_pdr = specs.MultiInputObj
elif isinstance(tp, traits_extension.OutputMultiObject):
@@ -261,7 +642,7 @@ def pydra_type_converter(self, field, spec_type, name):
elif isinstance(tp, traits.trait_types.List):
if isinstance(field.inner_traits[0].trait_type, traits_extension.File):
if spec_type == "input":
- tp_pdr = specs.MultiInputFile
+ tp_pdr = ty.List[File]
else:
tp_pdr = specs.MultiOutputFile
else:
@@ -272,124 +653,200 @@ def pydra_type_converter(self, field, spec_type, name):
): # TODO check the hash_file metadata in nipype
tp_pdr = specs.File
else:
- tp_pdr = str
+ tp_pdr = Path
else:
tp_pdr = ty.Any
return tp_pdr
def string_formats(self, argstr, name):
- import re
-
- if "%s" in argstr:
- argstr_new = argstr.replace("%s", f"{{{name}}}")
- elif "%d" in argstr:
- argstr_new = argstr.replace("%d", f"{{{name}}}")
- elif "%f" in argstr:
- argstr_new = argstr.replace("%f", f"{{{name}}}")
- elif "%g" in argstr:
- argstr_new = argstr.replace("%g", f"{{{name}}}")
- elif len(re.findall("%[0-9.]+f", argstr)) == 1:
- old_format = re.findall("%[0-9.]+f", argstr)[0]
- argstr_new = argstr.replace(old_format, f"{{{name}:{old_format[1:]}}}")
- else:
- raise Exception(f"format from {argstr} is not supported TODO")
- return argstr_new
-
- def write_task(self, filename, input_fields, output_fields):
+ keys = re.findall(r"(%[0-9\.]*(?:s|d|i|g|f))", argstr)
+ new_argstr = argstr
+ for i, key in enumerate(keys):
+ repl = f"{name}" if len(keys) == 1 else f"{name}[{i}]"
+ match = re.match(r"%([0-9\.]+)f", key)
+ if match:
+ repl += ":" + match.group(1)
+ new_argstr = new_argstr.replace(key, r"{" + repl + r"}", 1)
+ return new_argstr
+
+ def write_task(self, filename, input_fields, nonstd_types, output_fields):
"""writing pydra task to the dile based on the input and output spec"""
def types_to_names(spec_fields):
spec_fields_str = []
for el in spec_fields:
el = list(el)
- try:
- el[1] = el[1].__name__
- # add 'TYPE_' to the beginning of the name
- el[1] = "TYPE_" + el[1]
- except (AttributeError):
- el[1] = el[1]._name
- # add 'TYPE_' to the beginning of the name
- el[1] = "TYPE_" + el[1]
+ tp_str = str(el[1])
+ if tp_str.startswith(" ty.List[str]:
+ """Constructs a list of imports to include at start of file"""
+ stmts: ty.Dict[str, str] = {}
+
+ def add_import(stmt):
+ match = re.match(r".*\s+as\s+(\w+)\s*", stmt)
+ if not match:
+ match = re.match(r".*import\s+(\w+)\s*$", stmt)
+ if not match:
+ raise ValueError(f"Unrecognised import statment {stmt}")
+ token = match.group(1)
+ try:
+ prev_stmt = stmts[token]
+ except KeyError:
+ pass
else:
- tests_inp_error.append((tests_inputs[i], out))
-
- spec_str = "import os, pytest \nfrom pathlib import Path\n"
- spec_str += f"from {self.output_module} import {self.task_name} \n\n"
- if run:
- pass
- spec_str += f"@pytest.mark.parametrize('inputs, outputs', {tests_inp_outp})\n"
- spec_str += f"def test_{self.task_name.lower()}(test_data, inputs, outputs):\n"
- spec_str += " in_file = Path(test_data) / 'test.nii.gz'\n"
- spec_str += " if inputs is None: inputs = {{}}\n"
- spec_str += " for key, val in inputs.items():\n"
- spec_str += " try: inputs[key] = eval(val)\n"
- spec_str += " except: pass\n"
- spec_str += f" task = {self.task_name}(in_file=in_file, **inputs)\n"
- spec_str += (
- " assert set(task.generated_output_names) == "
- "set(['return_code', 'stdout', 'stderr'] + outputs)\n"
- )
-
- if run:
+ if prev_stmt != stmt:
+ raise ValueError(
+ f"Cannot add import statement {stmt} as it clashes with "
+ f"previous import {prev_stmt}"
+ )
+ stmts[token] = stmt
+
+ for b in base:
+ add_import(b)
+
+ if re.match(r".*(?>> task = {self.task_name}()\n"
- for key, val in self.doctest.items():
- if type(val) is str:
- doctest += f' >>> task.inputs.{key} = "{val}"\n'
- else:
- doctest += f" >>> task.inputs.{key} = {val}\n"
- doctest += " >>> task.cmdline\n"
- doctest += f" '{cmdline}'"
- doctest += '\n """\n'
- return doctest
+ doctest_str = ""
+ for doctest in self.doctests:
+ doctest_str += f" >>> task = {self.task_name}()\n"
+ for field in input_fields:
+ nm, tp = field[:2]
+ try:
+ val = doctest.inputs[nm]
+ except KeyError:
+ if is_fileset(tp):
+ val = f"{tp.__name__}.mock()"
+ else:
+ val = attrs.NOTHING
+ else:
+ if type(val) is str:
+ val = f'"{val}"'
+ if val is not attrs.NOTHING:
+ doctest_str += f" >>> task.inputs.{nm} = {val}\n"
+ doctest_str += " >>> task.cmdline\n"
+ doctest_str += f" '{doctest.cmdline}'"
+ doctest_str += "\n\n\n"
+
+ imports = self.construct_imports(nonstd_types, doctest_str)
+ if imports:
+ doctest_str = " >>> " + "\n >>> ".join(imports) + "\n\n" + doctest_str
+
+ return " Examples\n -------\n\n" + doctest_str
INPUT_KEYS = [
"allowed_values",
@@ -455,17 +914,74 @@ def create_doctest(self):
"trait_modified",
]
- TYPE_REPLACE = [
- ("'TYPE_File'", "specs.File"),
- ("'TYPE_bool'", "bool"),
- ("'TYPE_str'", "str"),
- ("'TYPE_Any'", "ty.Any"),
- ("'TYPE_int'", "int"),
- ("'TYPE_float'", "float"),
- ("'TYPE_list'", "list"),
- ("'TYPE_dict'", "dict"),
- ("'TYPE_MultiInputObj'", "specs.MultiInputObj"),
- ("'TYPE_MultiOutputObj'", "specs.MultiOutputObj"),
- ("'TYPE_MultiInputFile'", "specs.MultiInputFile"),
- ("'TYPE_MultiOutputFile'", "specs.MultiOutputFile"),
- ]
\ No newline at end of file
+ TIMEOUT_PASS = """import time
+from traceback import format_exc
+import threading
+from dataclasses import dataclass
+from _pytest.runner import TestReport
+
+
+def pass_after_timeout(seconds, poll_interval=0.1):
+ \"\"\"Cancel the test after a certain period, after which it is assumed that the arguments
+ passed to the underying command have passed its internal validation (so we don't have
+ to wait until the tool completes)
+
+ Parameters
+ ----------
+ seconds : int
+ the number of seconds to wait until cancelling the test (and marking it as passed)
+ \"\"\"
+
+ def decorator(test_func):
+ def wrapper(*args, **kwargs):
+ @dataclass
+ class TestState:
+ \"\"\"A way of passing a reference to the result that can be updated by
+ the test thread\"\"\"
+
+ result = None
+ trace_back = None
+
+ state = TestState()
+
+ def test_runner():
+ try:
+ state.result = test_func(*args, **kwargs)
+ except Exception:
+ state.trace_back = format_exc()
+ raise
+
+ thread = threading.Thread(target=test_runner)
+ thread.start()
+
+ # Calculate the end time for the timeout
+ end_time = time.time() + seconds
+
+ while thread.is_alive() and time.time() < end_time:
+ time.sleep(poll_interval)
+
+ if thread.is_alive():
+ thread.join()
+ return state.result
+
+ if state.trace_back:
+ raise state.trace_back
+
+ outcome = "passed after timeout"
+ rep = TestReport.from_item_and_call(
+ item=args[0],
+ when="call",
+ excinfo=None,
+ outcome=outcome,
+ sections=None,
+ duration=0,
+ keywords=None,
+ )
+ args[0].ihook.pytest_runtest_logreport(report=rep)
+
+ return state.result
+
+ return wrapper
+
+ return decorator
+"""
diff --git a/nipype2pydra/utils.py b/nipype2pydra/utils.py
index 35e048aa..868c3270 100644
--- a/nipype2pydra/utils.py
+++ b/nipype2pydra/utils.py
@@ -3,8 +3,10 @@
from types import ModuleType
import sys
import os
+import inspect
from contextlib import contextmanager
from pathlib import Path
+from fileformats.core import FileSet
from importlib import import_module
@@ -63,3 +65,39 @@ def add_to_sys_path(path: Path):
yield sys.path
finally:
sys.path.pop(0)
+
+
+def is_fileset(tp: type):
+ return (
+ inspect.isclass(tp)
+ and type(tp) is not ty.GenericAlias
+ and issubclass(tp, FileSet)
+ )
+
+
+def to_snake_case(name: str) -> str:
+ """
+ Converts a PascalCase string to a snake_case one
+ """
+ snake_str = ""
+
+ # Loop through each character in the input string
+ for i, char in enumerate(name):
+ # If the current character is uppercase and it's not the first character or
+ # followed by another uppercase character, add an underscore before it and
+ # convert it to lowercase
+ if (
+ i > 0
+ and (char.isupper() or char.isdigit())
+ and (
+ not (name[i - 1].isupper() or name[i - 1].isdigit())
+ or ((i + 1) < len(name) and (name[i + 1].islower() or name[i + 1].islower()))
+ )
+ ):
+ snake_str += "_"
+ snake_str += char.lower()
+ else:
+ # Otherwise, just add the character as it is
+ snake_str += char.lower()
+
+ return snake_str
diff --git a/pyproject.toml b/pyproject.toml
index b58a5c0a..c030eda3 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -13,6 +13,10 @@ dependencies = [
"nipype",
"pydra",
"PyYAML>=6.0",
+ "fileformats >=0.8",
+ "fileformats-medimage >=0.4",
+ "fileformats-datascience",
+ "traits",
]
license = {file = "LICENSE"}
authors = [
diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py
index 2bd6b2c2..b79be4b1 100644
--- a/scripts/pkg_gen/create_packages.py
+++ b/scripts/pkg_gen/create_packages.py
@@ -2,16 +2,38 @@
import typing as ty
import tempfile
import re
+from importlib import import_module
+from copy import copy
import subprocess as sp
import shutil
import tarfile
from pathlib import Path
+import attrs
+from warnings import warn
import requests
import click
import yaml
+import fileformats.core.utils
+import fileformats.core.mixin
+from fileformats.generic import File
+from fileformats.medimage import Nifti1, NiftiGz, Bval, Bvec
+from fileformats.application import Dicom, Xml
+from fileformats.text import TextFile
+from fileformats.datascience import TextMatrix, DatFile
+import nipype.interfaces.base.core
+from nipype2pydra.task import (
+ InputsConverter,
+ OutputsConverter,
+ TestGenerator,
+ DocTestGenerator,
+)
+from nipype2pydra.utils import to_snake_case
+
RESOURCES_DIR = Path(__file__).parent / "resources"
+EXPECTED_FORMATS = [Nifti1, NiftiGz, TextFile, TextMatrix, DatFile, Xml]
+
def download_tasks_template(output_path: Path):
"""Downloads the latest pydra-tasks-template to the output path"""
@@ -23,9 +45,7 @@ def download_tasks_template(output_path: Path):
response = requests.get(release_url, headers=headers)
if response.status_code != 200:
- raise RuntimeError(
- f"Did not find release at '{release_url}'"
- )
+ raise RuntimeError(f"Did not find release at '{release_url}'")
data = response.json()
tarball_url = data["tarball_url"]
@@ -45,10 +65,11 @@ def download_tasks_template(output_path: Path):
@click.argument("output_dir", type=click.Path(path_type=Path))
@click.option("--work-dir", type=click.Path(path_type=Path), default=None)
@click.option("--task-template", type=click.Path(path_type=Path), default=None)
+@click.option("--packages-to-import", type=click.Path(path_type=Path), default=None)
def generate_packages(
- output_dir: Path, work_dir: ty.Optional[Path], task_template: ty.Optional[Path]
+ output_dir: Path, work_dir: ty.Optional[Path], task_template: ty.Optional[Path],
+ packages_to_import: ty.Optional[Path]
):
-
if work_dir is None:
work_dir = Path(tempfile.mkdtemp())
@@ -56,11 +77,14 @@ def generate_packages(
task_template_tar = work_dir / "task-template.tar.gz"
download_tasks_template(task_template_tar)
extract_dir = work_dir / "task_template"
- with tarfile.open(task_template_tar, 'r:gz') as tar:
+ with tarfile.open(task_template_tar, "r:gz") as tar:
tar.extractall(path=extract_dir)
task_template = extract_dir / next(extract_dir.iterdir())
- with open(Path(__file__).parent.parent.parent / "nipype-interfaces-to-import.yaml") as f:
+ if packages_to_import is None:
+ packages_to_import = Path(__file__).parent.parent.parent / "nipype-interfaces-to-import.yaml"
+
+ with open(packages_to_import) as f:
to_import = yaml.load(f, Loader=yaml.SafeLoader)
# Wipe output dir
@@ -68,69 +92,290 @@ def generate_packages(
shutil.rmtree(output_dir)
output_dir.mkdir()
+ not_interfaces = []
+ unmatched_formats = []
+ ambiguous_formats = []
+ has_doctests = set()
+
for pkg in to_import["packages"]:
+ pkg_dir = initialise_task_repo(output_dir, task_template, pkg)
- pkg_dir = output_dir / f"pydra-{pkg}"
- pkg_dir.mkdir()
+ spec_dir = pkg_dir / "nipype-auto-conv" / "specs"
+ spec_dir.mkdir(parents=True, exist_ok=True)
- def copy_ignore(_, names):
- return [n for n in names if n in (".git", "__pycache__", ".pytest_cache")]
+ # Loop through all nipype modules and create specs for their auto-conversion
+ for module, interfaces in to_import["interfaces"].items():
+ if module.split("/")[0] != pkg:
+ continue
- shutil.copytree(task_template, pkg_dir, ignore=copy_ignore)
+ # Loop through all interfaces in module
+ for interface in interfaces:
+ spec_name = to_snake_case(interface)
+ callables_fspath = spec_dir / f"{spec_name}_callables.py"
+ spec_stub = {}
- auto_conv_dir = pkg_dir / "nipype-auto-conv"
- specs_dir = auto_conv_dir / "specs"
- specs_dir.mkdir(parents=True)
- shutil.copy(RESOURCES_DIR / "nipype-auto-convert.py", auto_conv_dir / "generate")
- os.chmod(auto_conv_dir / "generate", 0o755) # make executable
+ # Import interface from module
+ nipype_module_str = "nipype.interfaces." + ".".join(module.split("/"))
+ nipype_module = import_module(nipype_module_str)
+ nipype_interface = getattr(nipype_module, interface)
+ if not issubclass(
+ nipype_interface, nipype.interfaces.base.core.Interface
+ ):
+ not_interfaces.append(f"{module}.{interface}")
+ continue
- gh_workflows_dir = pkg_dir / ".github" / "workflows"
- gh_workflows_dir.mkdir(parents=True)
- shutil.copy(RESOURCES_DIR / "pythonpackage.yaml", gh_workflows_dir / "pythonpackage.yaml")
+ (
+ preamble,
+ input_helps,
+ output_helps,
+ file_inputs,
+ file_outputs,
+ genfile_outputs,
+ multi_inputs,
+ ) = parse_nipype_interface(nipype_interface)
- # Add "pydra.tasks..auto to gitignore"
- with open(pkg_dir / ".gitignore", "a") as f:
- f.write("\npydra/tasks/{pkg}/auto")
+ # Create "stubs" for each of the available fields
+ def fields_stub(name, category_class, values=None):
+ """Used, in conjunction with some find/replaces after dumping, to
+ insert comments into the YAML file"""
+ dct = {}
+ for field in attrs.fields(category_class):
+ field_name = f"{name}.{field.name}"
+ try:
+ val = values[field.name]
+ except (KeyError, TypeError):
+ val = (
+ field.default
+ if (
+ field.default != attrs.NOTHING
+ and not isinstance(field.default, attrs.Factory)
+ )
+ else None
+ )
+ else:
+ if isinstance(val, ty.Iterable) and not val:
+ val = None
+ dct[field_name] = val
+ return dct
- # rename tasks directory
- (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename(pkg_dir / "pydra" / "tasks" / pkg)
+ input_types = {i: File for i in file_inputs}
+ output_types = {o: File for o in file_outputs}
+ output_templates = {}
- # Replace "CHANGEME" string with pkg name
- for fspath in pkg_dir.glob("**/*"):
- if fspath.is_dir():
- continue
- with open(fspath) as f:
- contents = f.read()
- contents = re.sub(r"(?>>" in doctest_str:
+ try:
+ cmdline, inpts, directive, imports = extract_doctest_inputs(
+ doctest_str, interface
+ )
+ except ValueError:
+ intf_name = f"{module.replace('/', '.')}.{interface}"
+ warn(
+ f"Could not parse doctest for {intf_name}:\n{doctest_str}"
+ )
+ continue
+
+ def guess_type(fspath):
+ try:
+ fspath = re.search(
+ r"""['"]([^'"]*)['"]""", fspath
+ ).group(1)
+ except AttributeError:
+ return File
+ possible_formats = []
+ for frmt in fileformats.core.FileSet.all_formats:
+ if not frmt.ext or None in frmt.alternate_exts:
+ continue
+ if frmt.matching_exts(fspath):
+ possible_formats.append(frmt)
+ if not possible_formats:
+ if fspath.endswith(".dcm"):
+ return Dicom
+ if fspath == "bvals":
+ return Bval
+ if fspath == "bvecs":
+ return Bvec
+ unmatched_formats.append(
+ f"{module}.{interface}: {fspath}"
+ )
+ return File
+
+ for expected in EXPECTED_FORMATS:
+ if expected in possible_formats:
+ return expected
+ if len(possible_formats) > 1:
+ non_adjacent = [
+ f
+ for f in possible_formats
+ if not issubclass(
+ f, fileformats.core.mixin.WithAdjacentFiles
+ )
+ ]
+ if non_adjacent:
+ possible_formats = non_adjacent
+ if len(possible_formats) > 1:
+ possible_formats = sorted(
+ possible_formats, key=lambda f: f.__name__
+ )
+ ambiguous_formats.append(possible_formats)
+ return possible_formats[0]
+
+ def combine_types(type_, prev_type):
+ if type_ is File:
+ return prev_type
+ if prev_type is not File:
+ if ty.get_origin(prev_type) is ty.Union:
+ prev_types = ty.get_args(prev_type)
+ else:
+ prev_types = [prev_type]
+ return ty.Union.__getitem__(
+ (type_,) + tuple(prev_types)
+ )
+ return type_
+
+ test_inpts: ty.Dict[str, ty.Optional[ty.Type]] = {}
+ for name, val in inpts.items():
+ if name in file_inputs:
+ guessed_type = guess_type(val)
+ input_types[name] = combine_types(
+ guessed_type, input_types[name]
+ )
+ test_inpts[name] = None
+ else:
+ test_inpts[name] = val
+ if name in file_outputs:
+ guessed_type = guess_type(val)
+ output_types[name] = combine_types(
+ guessed_type, output_types[name]
+ )
+ if name in genfile_outputs:
+ output_templates[name] = val
+
+ tests.append(
+ fields_stub(
+ "test",
+ TestGenerator,
+ {"inputs": test_inpts, "imports": imports},
+ )
+ )
+ doctests.append(
+ fields_stub(
+ "doctest",
+ DocTestGenerator,
+ {
+ "cmdline": cmdline,
+ "inputs": copy(test_inpts),
+ "imports": imports,
+ "directive": directive,
+ },
+ )
+ )
+ has_doctests.add(f"{module.replace('/', '.')}.{interface}")
+
+ # Add default template names for fields not explicitly listed in doctests
+ for outpt in genfile_outputs:
+ if outpt not in output_templates:
+ try:
+ frmt = output_types[outpt]
+ except KeyError:
+ ext = ""
+ else:
+ if getattr(frmt, "_name", None) == "Union":
+ ext = ty.get_args(frmt)[0].strext
+ else:
+ ext = frmt.strext
+ output_templates[outpt] = outpt + ext
+
+ # convert to multi-input types to lists
+ input_types = {
+ n: ty.List[t] if n in multi_inputs else t
+ for n, t in input_types.items()
+ }
- for module, interfaces in to_import["interfaces"].items():
- if module.split("/")[0] != pkg:
- continue
- module_spec_dir = specs_dir.joinpath(*module.split("/"))
- module_spec_dir.mkdir(parents=True)
- for interface in interfaces:
- callables_fspath = module_spec_dir / f"{interface}_callables.py"
spec_stub = {
- "task_name": interface,
- "nipype_module": "nipype.interfaces." + ".".join(module.split("/")),
- "output_requirements": "# dict[output-field, list[input-field]] : the required input fields for output-field",
- "inputs_metadata": "# dict[input-field, dict[str, Any]] : additional metadata to be inserted into input field",
- "inputs_drop": "# list[input-field] : input fields to drop from the spec",
- "output_templates": "# dict[input-field, str] : \"output_file_template\" to provide to input field",
- "output_callables": f"# dict[output-field, str] : name of function defined in {callables_fspath.name} that retrieves value for output",
- "doctest": "# dict[str, Any]: key-value pairs to provide as inputs to the doctest + the expected value of \"cmdline\" as special key-value pair",
- "tests_inputs": "# List of inputs to pass to tests",
- "tests_outputs": "# list of outputs expected from tests",
+ "task_name": to_snake_case(interface),
+ "nipype_name": interface,
+ "nipype_module": nipype_module_str,
+ "inputs": fields_stub(
+ "inputs",
+ InputsConverter,
+ {
+ "types": {
+ n: fileformats.core.utils.to_mime(t, official=False)
+ for n, t in input_types.items()
+ }
+ },
+ ),
+ "outputs": fields_stub(
+ "outputs",
+ OutputsConverter,
+ {
+ "types": {
+ n: fileformats.core.utils.to_mime(t, official=False)
+ for n, t in output_types.items()
+ },
+ "templates": output_templates,
+ },
+ ),
+ "tests": tests,
+ "doctests": doctests,
}
- yaml_str = yaml.dump(spec_stub, indent=2, sort_keys=False)
- # strip inserted line-breaks in long strings (so they can be converted to in-line comments)
- yaml_str = re.sub(r"\n ", " ", yaml_str)
- # extract comments after they have been dumped as strings
- yaml_str = re.sub(r"'#(.*)'", r" # \1", yaml_str)
- with open(module_spec_dir / (interface + ".yaml"), "w") as f:
- f.write(yaml_str)
+ yaml_str = yaml.dump(spec_stub, indent=2, sort_keys=False, width=4096)
+ # Strip explicit nulls from dumped YAML
+ yaml_str = yaml_str.replace(" null", "")
+ # Inject comments into dumped YAML
+ for category_name, category_class in [
+ ("inputs", InputsConverter),
+ ("outputs", OutputsConverter),
+ ("test", TestGenerator),
+ ("doctest", DocTestGenerator),
+ ]:
+ for field in attrs.fields(category_class):
+ tp = field.type
+ if tp.__module__ == "builtins":
+ tp_name = tp.__name__
+ else:
+ tp_name = str(tp).lower().replace("typing.", "")
+ comment = f" # {tp_name} - " + field.metadata["help"].replace(
+ "\n ", "\n # "
+ )
+ yaml_str = re.sub(
+ f" {category_name}.{field.name}:" + r"(.*)",
+ f" {field.name}:" + r"\1" + f"\n{comment}",
+ yaml_str,
+ )
+ # Add comments to input and output fields, with their type and description
+ for inpt, desc in input_helps.items():
+ yaml_str = re.sub(f" ({inpt}):(.*)", r" \1:\2\n # ##PLACEHOLDER##", yaml_str)
+ yaml_str = yaml_str.replace("##PLACEHOLDER##", desc)
+ for outpt, desc in output_helps.items():
+ yaml_str = re.sub(f" ({outpt}):(.*)", r" \1:\2\n # ##PLACEHOLDER##", yaml_str)
+ yaml_str = yaml_str.replace("##PLACEHOLDER##", desc)
+
+ with open(spec_dir / (spec_name + ".yaml"), "w") as f:
+ f.write(preamble + yaml_str)
with open(callables_fspath, "w") as f:
f.write(
f'"""Module to put any functions that are referred to in {interface}.yaml"""\n'
@@ -138,9 +383,258 @@ def copy_ignore(_, names):
sp.check_call("git init", shell=True, cwd=pkg_dir)
sp.check_call("git add --all", shell=True, cwd=pkg_dir)
- sp.check_call('git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir)
+ sp.check_call(
+ 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir
+ )
sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir)
+ unmatched_extensions = set(
+ File.decompose_fspath(
+ f.split(":")[1].strip(), mode=File.ExtensionDecomposition.single
+ )[2]
+ for f in unmatched_formats
+ )
+
+ print("Unmatched test input formats")
+ print("\n".join(unmatched_formats))
+ print("Unmatched format extensions")
+ print("\n".join(sorted(unmatched_extensions)))
+ print("\nAmbiguous formats")
+ print("\n".join(str(p) for p in ambiguous_formats))
+ print("\nWith doctests")
+ print("\n".join(sorted(has_doctests)))
+
+
+def initialise_task_repo(output_dir, task_template: Path, pkg: str) -> Path:
+ """Copy the task template to the output directory and customise it for the given
+ package name and return the created package directory"""
+
+ pkg_dir = output_dir / f"pydra-{pkg}"
+
+ def copy_ignore(_, names):
+ return [n for n in names if n in (".git", "__pycache__", ".pytest_cache")]
+
+ shutil.copytree(task_template, pkg_dir, ignore=copy_ignore)
+
+ # Setup script to auto-convert nipype interfaces
+ auto_conv_dir = pkg_dir / "nipype-auto-conv"
+ specs_dir = auto_conv_dir / "specs"
+ specs_dir.mkdir(parents=True)
+ shutil.copy(RESOURCES_DIR / "nipype-auto-convert.py", auto_conv_dir / "generate")
+ os.chmod(auto_conv_dir / "generate", 0o755) # make executable
+ shutil.copy(
+ RESOURCES_DIR / "nipype-auto-convert-requirements.txt",
+ auto_conv_dir / "requirements.txt",
+ )
+
+ # Setup GitHub workflows
+ gh_workflows_dir = pkg_dir / ".github" / "workflows"
+ gh_workflows_dir.mkdir(parents=True, exist_ok=True)
+ shutil.copy(
+ RESOURCES_DIR / "gh_workflows" / "pythonpackage.yaml",
+ gh_workflows_dir / "pythonpackage.yaml",
+ )
+
+ # Add modified README
+ os.unlink(pkg_dir / "README.md")
+ shutil.copy(RESOURCES_DIR / "README.rst", pkg_dir / "README.rst")
+ with open(pkg_dir / "pyproject.toml") as f:
+ pyproject_toml = f.read()
+ pyproject_toml = pyproject_toml.replace("README.md", "README.rst")
+ with open(pkg_dir / "pyproject.toml", "w") as f:
+ f.write(pyproject_toml)
+
+ # Add "pydra.tasks..auto to gitignore"
+ with open(pkg_dir / ".gitignore", "a") as f:
+ f.write(f"\n/pydra/tasks/{pkg}/auto" f"\n/pydra/tasks/{pkg}/_version.py\n")
+
+ # rename tasks directory
+ (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename(pkg_dir / "pydra" / "tasks" / pkg)
+
+ # Add in modified __init__.py
+ shutil.copy(
+ RESOURCES_DIR / "pkg_init.py", pkg_dir / "pydra" / "tasks" / pkg / "__init__.py"
+ )
+
+ # Replace "CHANGEME" string with pkg name
+ for fspath in pkg_dir.glob("**/*"):
+ if fspath.is_dir():
+ continue
+ with open(fspath) as f:
+ contents = f.read()
+ contents = re.sub(r"(? ty.Tuple[
+ str,
+ ty.Dict[str, str],
+ ty.Dict[str, str],
+ ty.List[str],
+ ty.List[str],
+ ty.List[str],
+ ty.List[str],
+]:
+ """Generate preamble comments at start of file with args and doc strings"""
+ input_helps = {}
+ file_inputs = []
+ genfile_outputs = []
+ multi_inputs = []
+ if nipype_interface.input_spec:
+ for inpt_name, inpt in nipype_interface.input_spec().traits().items():
+ if inpt_name in ("trait_added", "trait_modified"):
+ continue
+ inpt_desc = inpt.desc.replace("\n", " ") if inpt.desc else ""
+ inpt_mdata = f"type={type(inpt.trait_type).__name__.lower()}|default={inpt.default!r}"
+ if isinstance(inpt.trait_type, nipype.interfaces.base.core.traits.Enum):
+ inpt_mdata += f"|allowed[{','.join(sorted(repr(v) for v in inpt.trait_type.values))}]"
+ input_helps[inpt_name] = f"{inpt_mdata}: {inpt_desc}"
+ if inpt.genfile:
+ genfile_outputs.append(inpt_name)
+ elif type(inpt.trait_type).__name__ == "File":
+ file_inputs.append(inpt_name)
+ elif type(inpt.trait_type).__name__ == "InputMultiObject":
+ file_inputs.append(inpt_name)
+ multi_inputs.append(inpt_name)
+ elif (
+ type(inpt.trait_type).__name__ == "List"
+ and type(inpt.trait_type.inner_traits()[0].handler).__name__ == "File"
+ ):
+ file_inputs.append(inpt_name)
+ multi_inputs.append(inpt_name)
+ file_outputs = []
+ output_helps = {}
+ if nipype_interface.output_spec:
+ for outpt_name, outpt in nipype_interface.output_spec().traits().items():
+ if outpt_name in ("trait_added", "trait_modified"):
+ continue
+ outpt_desc = outpt.desc.replace("\n", " ") if outpt.desc else ""
+ output_helps[
+ outpt_name
+ ] = f"type={type(outpt.trait_type).__name__.lower()}: {outpt_desc}"
+ if type(outpt.trait_type).__name__ == "File":
+ file_outputs.append(outpt_name)
+ doc_string = nipype_interface.__doc__ if nipype_interface.__doc__ else ""
+ doc_string = doc_string.replace("\n", "\n# ")
+ # Create a preamble at the top of the specificaiton explaining what to do
+ preamble = (
+ f"""# This file is used to manually specify the semi-automatic conversion of
+ # '{nipype_interface.__module__.replace('/', '.')}.{nipype_interface.__name__}' from Nipype to Pydra.
+ #
+ # Please fill-in/edit the fields below where appropriate
+ #
+ # Docs
+ # ----
+ # {doc_string}\n"""
+ ).replace(" #", "#")
+ return (
+ preamble,
+ input_helps,
+ output_helps,
+ file_inputs,
+ file_outputs,
+ genfile_outputs,
+ multi_inputs,
+ )
+
+
+def extract_doctest_inputs(
+ doctest: str, interface: str
+) -> ty.Tuple[
+ ty.Optional[str], dict[str, ty.Any], ty.Optional[str], ty.List[ty.Dict[str, str]]
+]:
+ """Extract the inputs passed to tasks in the doctests of Nipype interfaces
+
+ Parameters
+ ----------
+ doctest : str
+ the doc string of the interface
+ interface : str
+ the name of the interface
+
+ Returns
+ -------
+ cmdline : str
+ the expected cmdline
+ inputs : dict[str, ty.Any]
+ the inputs passed to the task
+ directive : str
+ any doctest directives found after the cmdline, e.g. ELLIPSIS"""
+ match = re.search(
+ r"""^\s+>>> (?:.*)\.cmdline(\s*# doctest: .*)?\n\s*('|")(.*)(?:'|")?\s*.*(?!>>>)\2""",
+ doctest,
+ flags=re.MULTILINE | re.DOTALL,
+ )
+ if match:
+ cmdline = match.group(3)
+ cmdline = re.sub(r"\s+", " ", cmdline)
+ cmdline = cmdline.replace("'", '"') if '"' not in cmdline else cmdline
+ directive = match.group(2)
+ if directive == '"' or directive == "'":
+ directive = None
+ else:
+ cmdline = directive = None
+ doctest_inpts = {
+ n: v.replace("'", '"') if '"' not in v else v
+ for n, v in re.findall(
+ r"""\s+>>> (?:\w+)\.inputs\.(\w+) ?= ?(.*)\n""",
+ doctest,
+ )
+ }
+ match = re.search(
+ interface + r"""\(([^\)]+)\)(\n| ?#|\.cmdline)""",
+ doctest,
+ )
+ if match is not None:
+ arg_str = match.group(1) + ", "
+ doctest_inpts.update(
+ {
+ n: v.replace("'", '"') if '"' not in v else v
+ for n, v in re.findall(r"(\w+) *= *([^=]+), *", arg_str)
+ }
+ )
+ imports = []
+ for ln in doctest.splitlines():
+ if re.match(r".*>>>.*(?>> import (.*)$", ln)
+ if match:
+ for mod in match.group(1).split(","):
+ imports.append({"module": mod.strip()})
+ else:
+ match = re.match(r".*>>> from ([\w\.]+) import (.*)", ln)
+ if not match:
+ raise ValueError(f"Could not parse import statement: {ln}")
+ module = match.group(1)
+ if "nipype.interfaces" in module:
+ continue
+ for atr in match.group(2).split(","):
+ match = re.match(r"(\w+) as ((\w+))", atr)
+ if match:
+ name = match.group(1)
+ alias = match.group(2)
+ else:
+ name = atr
+ alias = None
+ imports.append(
+ {
+ "module": module,
+ "name": name,
+ "alias": alias,
+ }
+ )
+ if not doctest_inpts:
+ raise ValueError(f"Could not parse doctest:\n{doctest}")
+
+ if not directive or directive == "''" or directive == '""':
+ directive = None
+
+ return cmdline, doctest_inpts, directive, imports
+
if __name__ == "__main__":
import sys
diff --git a/scripts/pkg_gen/fastsurfer-only.yaml b/scripts/pkg_gen/fastsurfer-only.yaml
new file mode 100644
index 00000000..1e78fd89
--- /dev/null
+++ b/scripts/pkg_gen/fastsurfer-only.yaml
@@ -0,0 +1,5 @@
+packages:
+- fastsurfer
+interfaces:
+ fastsurfer:
+ - FastSurfer
\ No newline at end of file
diff --git a/scripts/pkg_gen/freesurfer-only.yaml b/scripts/pkg_gen/freesurfer-only.yaml
new file mode 100644
index 00000000..dec14f82
--- /dev/null
+++ b/scripts/pkg_gen/freesurfer-only.yaml
@@ -0,0 +1,88 @@
+packages:
+- freesurfer
+interfaces:
+ freesurfer:
+ - ParseDICOMDir
+ - UnpackSDICOMDir
+ - MRIConvert
+ - Resample
+ - ReconAll
+ - BBRegister
+ - ApplyVolTransform
+ - Smooth
+ - DICOMConvert
+ - RobustRegister
+ - FitMSParams
+ - SynthesizeFLASH
+ - MNIBiasCorrection
+ - WatershedSkullStrip
+ - Normalize
+ - CANormalize
+ - CARegister
+ - CALabel
+ - MRIsCALabel
+ - SegmentCC
+ - SegmentWM
+ - EditWMwithAseg
+ - ConcatenateLTA
+ - MRISPreproc
+ - MRISPreprocReconAll
+ - GLMFit
+ - OneSampleTTest
+ - Binarize
+ - Concatenate
+ - SegStats
+ - SegStatsReconAll
+ - Label2Vol
+ - MS_LDA
+ - Label2Label
+ - Label2Annot
+ - SphericalAverage
+ - SampleToSurface
+ - SurfaceSmooth
+ - SurfaceTransform
+ - Surface2VolTransform
+ - SurfaceSnapshots
+ - ApplyMask
+ - MRIsConvert
+ - MRITessellate
+ - MRIPretess
+ - MRIMarchingCubes
+ - SmoothTessellation
+ - MakeAverageSubject
+ - ExtractMainComponent
+ - Tkregister2
+ - AddXFormToHeader
+ - CheckTalairachAlignment
+ - TalairachAVI
+ - TalairachQC
+ - RemoveNeck
+ - MRIFill
+ - MRIsInflate
+ - Sphere
+ - FixTopology
+ - EulerNumber
+ - RemoveIntersection
+ - MakeSurfaces
+ - Curvature
+ - CurvatureStats
+ - Jacobian
+ - MRIsCalc
+ - VolumeMask
+ - ParcellationStats
+ - Contrast
+ - RelabelHypointensities
+ - Aparc2Aseg
+ - Apas2Aseg
+ - MRIsExpand
+ - MRIsCombine
+ - RobustTemplate
+ - FuseSegmentations
+ - MPRtoMNI305
+ - RegisterAVItoTalairach
+ - EMRegister
+ - Register
+ - Paint
+ - MRICoreg
+ - GTMSeg
+ - GTMPVC
diff --git a/scripts/pkg_gen/resources/README.rst b/scripts/pkg_gen/resources/README.rst
new file mode 100644
index 00000000..4e5f72bd
--- /dev/null
+++ b/scripts/pkg_gen/resources/README.rst
@@ -0,0 +1,154 @@
+===============================
+Pydra task package for CHANGEME
+===============================
+
+.. image:: https://github.com/nipype/pydra-CHANGEME/actions/workflows/pythonpackage.yml/badge.svg
+ :target: https://github.com/nipype/pydra-CHANGEME/actions/workflows/pythonpackage.yml
+.. .. image:: https://codecov.io/gh/nipype/pydra-CHANGEME/branch/main/graph/badge.svg?token=UIS0OGPST7
+.. :target: https://codecov.io/gh/nipype/pydra-CHANGEME
+.. image:: https://img.shields.io/pypi/pyversions/pydra-CHANGEME.svg
+ :target: https://pypi.python.org/pypi/pydra-CHANGEME/
+ :alt: Supported Python versions
+.. image:: https://img.shields.io/pypi/v/pydra-CHANGEME.svg
+ :target: https://pypi.python.org/pypi/pydra-CHANGEME/
+ :alt: Latest Version
+
+
+This package contains a collection of Pydra task interfaces for the CHANGEME toolkit.
+The basis of this collection has been formed by the semi-automatic conversion of
+existing `Nipype `__ interfaces to Pydra using the
+`Nipype2Pydra `__ tool
+
+
+Automatically-generated vs manually-curated tasks
+-------------------------------------------------
+
+Automatically generated tasks can be found in the `pydra.tasks.CHANGEME.auto` package.
+These packages should be treated with extreme caution as they likely do not pass testing.
+Generated tasks that have been edited and pass testing are imported into one or more of the
+`pydra.tasks.CHANGEME.v*` packages, corresponding to the version of the CHANGEME toolkit
+they are designed for.
+
+Tests
+-----
+
+This package comes with a battery of automatically generated test modules. To install
+the necessary dependencies to run the tests
+
+.. code-block::
+
+ $ pip install -e .[test]
+
+Then the tests, including `doctests` `__, can be launched using
+
+.. code-block::
+
+ $ pytest --doctest-modules pydra/tasks/*
+
+By default, the tests are set to time-out after 10s, after which the underlying tool is
+assumed to have passed the validation/initialisation phase and we assume that it will
+run to completion. To disable this and run the test(s) through to completion run
+
+.. code-block::
+
+ $ pytest --doctest-modules --timeout-pass 0 pydra/tasks/*
+
+Continuous integration
+----------------------
+
+This template uses `GitHub Actions `__` to run tests and
+deploy packages to PYPI. New packages are built and uploaded when releases are created on
+GitHub, or new releases of Nipype or the Nipype2Pydra conversion tool are released.
+Releases triggered by updates to Nipype or Nipype2Pydra are signified by the `postN`
+suffix where `N = ` with the '.'s stripped, e.g.
+`v0.2.3post185010` corresponds to the v0.2.3 tag of this repository with auto-generated
+packages from Nipype 1.8.5 using Nipype2Pydra 0.1.0.
+
+
+Contributing to this package
+----------------------------
+
+Developer installation
+~~~~~~~~~~~~~~~~~~~~~~
+
+
+Install repo in developer mode from the source directory and install pre-commit to
+ensure consistent code-style and quality.
+
+.. code-block::
+
+ $ pip install -e .[test,dev]
+$ pre-commit install
+
+Next install the requirements for running the auto-conversion script and generate the
+Pydra task interfaces from their Nipype counterparts
+
+.. code-block::
+
+ $ pip install -r nipype-auto-conv/requirements.txt
+
+The run the conversion script to convert Nipype interfaces to Pydra
+
+.. code-block::
+
+ $ nipype-auto-conv/generate
+
+## Methodology
+
+The development of this package is expected to have two phases
+
+1. Where the corresponding Nipype interfaces are considered to be the ground truth, and
+ the Pydra tasks are generated from them
+2. When the Pydra tasks are considered be mature and they are edited by hand
+
+Different tasks will probably mature at different times so there will probably be an
+intermediate phase between 1 and 2.
+
+Auto-conversion phase
+~~~~~~~~~~~~~~~~~~~~~
+
+The auto-converted Pydra tasks are generated from their corresponding Nipype interface
+in combination with "conversion hints" contained in YAML specs
+located in `nipype-auto-conv/specs/`. The self-documented conversion specs are
+to be edited by hand in order to assist the auto-converter produce valid pydra tasks.
+After editing one or more conversion specs the `pydra.tasks.CHANGEME.auto` package should
+be regenerated by running
+
+.. code-block::
+
+ $ nipype-auto-conv/generate
+
+The tests should be run on the auto-generated tasks to see if they are valid
+
+.. code-block::
+
+ $ pytest --doctest-modules pydra/tasks/CHANGEME/auto/tests/test_.py
+
+If the test passes you should then edit the `pydra/tasks/CHANGEME/v/__init__.py` file
+to import the now valid task interface to signify that it has been validated and is ready
+for use, e.g.
+
+.. code-block::python
+
+ from pydra.tasks.CHANGEME.auto import
+
+
+Typing and sample test data
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The automatically generated tests will attempt to provided the task instance to be tested
+with sensible default values based on the type of the field and any constraints it has
+on it. However, these will often need to be manually overridden after consulting the
+underlying tool's documentation.
+
+For file-based data, automatically generated file-system objects will be created for
+selected format types, e.g. Nifti, Dicom. Therefore, it is important to specify the
+format of the file using the "mime-like" string corresponding to a
+`fileformats `__ class
+in the ``inputs > types`` and ``outputs > types`` dicts of the YAML spec.
+
+If the required file-type is not found implemented within fileformats, please see the `fileformats
+docs `__ for instructions on how to define
+new fileformat types, and see
+`fileformats-medimage-extras `__
+for an example on how to implement methods to generate sample data for them.
diff --git a/scripts/pkg_gen/resources/auto-release.yaml b/scripts/pkg_gen/resources/auto-release.yaml
deleted file mode 100644
index 17530f79..00000000
--- a/scripts/pkg_gen/resources/auto-release.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-name: Generate Release
-
-on:
- repository_dispatch:
- types: [create-release]
-
-jobs:
- create_release:
- runs-on: ubuntu-latest
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v2
-
- - name: Generate Release
- run: |
- # Extract necessary information from the event payload
- REPO_OWNER=${{ github.event.client_payload.repo_owner }}
- REPO_NAME=${{ github.event.client_payload.repo_name }}
- RELEASE_TAG=${{ github.event.client_payload.release_tag }}
- RELEASE_NAME=${{ github.event.client_payload.release_name }}
- RELEASE_BODY=${{ github.event.client_payload.release_body }}
-
- # Create a new release using the GitHub API
- curl -X POST \
- -H "Accept: application/vnd.github.v3+json" \
- -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
- -d '{
- "tag_name": "$RELEASE_TAG",
- "target_commitish": "master",
- "name": "$RELEASE_NAME",
- "body": "$RELEASE_BODY",
- "draft": false,
- "prerelease": false
- }' \
- "https://api.github.com/repos/$REPO_OWNER/$REPO_NAME/releases"
\ No newline at end of file
diff --git a/scripts/pkg_gen/resources/pythonpackage.yml b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml
similarity index 58%
rename from scripts/pkg_gen/resources/pythonpackage.yml
rename to scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml
index 79868a58..aa1a7a7b 100644
--- a/scripts/pkg_gen/resources/pythonpackage.yml
+++ b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml
@@ -6,23 +6,25 @@
name: Python package
-# Set once
-env:
- SUBPACKAGE: CHANGEME
-
on:
push:
- branches: [ main ]
+ branches: [ main, develop ]
tags: [ '*' ]
pull_request:
- branches: [ main ]
+ branches: [ main, develop ]
+ repository_dispatch:
+ types: [create-release]
jobs:
- auto-gen:
+ nipype-conv:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Revert version to most recent tag on upstream update
+ if: github.event_name == 'repository_dispatch'
+ run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}')
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
- name: Install build dependencies
@@ -31,18 +33,32 @@ jobs:
run: python -m pip install -r ./nipype-auto-conv/requirements.txt
- name: Run automatic Nipype > Pydra conversion
run: ./nipype-auto-conv/generate
+ - uses: actions/upload-artifact@v3
+ with:
+ name: converted-nipype
+ path: pydra/tasks/CHANGEME/auto
devcheck:
+ needs: [nipype-conv]
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.7, '3.10'] # Check oldest and newest versions
+ python-version: ['3.8', '3.11'] # Check oldest and newest versions
pip-flags: ['', '--editable']
pydra:
- 'pydra'
- '--editable git+https://github.com/nipype/pydra.git#egg=pydra'
steps:
- - uses: actions/checkout@v3
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Revert version to most recent tag on upstream update
+ if: github.event_name == 'repository_dispatch'
+ run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}')
+ - name: Download tasks converted from Nipype
+ uses: actions/download-artifact@v3
+ with:
+ name: converted-nipype
+ path: pydra/tasks/CHANGEME/auto
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
@@ -57,17 +73,26 @@ jobs:
- name: Install task package
run: |
pip install ${{ matrix.pip-flags }} ".[dev]"
- python -c "import pydra.tasks.$SUBPACKAGE as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
+ python -c "import pydra.tasks.CHANGEME as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
test:
+ needs: [nipype-conv]
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.7, 3.8, 3.9, '3.10']
+ python-version: ['3.8', '3.11']
steps:
- uses: actions/checkout@v3
+ - name: Revert version to most recent tag on upstream update
+ if: github.event_name == 'repository_dispatch'
+ run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}')
+ - name: Download tasks converted from Nipype
+ uses: actions/download-artifact@v3
+ with:
+ name: converted-nipype
+ path: pydra/tasks/CHANGEME/auto
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
@@ -78,12 +103,12 @@ jobs:
- name: Install task package
run: |
pip install ".[test]"
- python -c "import pydra.tasks.$SUBPACKAGE as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
+ python -c "import pydra.tasks.CHANGEME as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')"
- name: Test with pytest
run: |
- pytest -sv --doctest-modules pydra/tasks/$SUBPACKAGE \
- --cov pydra.tasks.$SUBPACKAGE --cov-report xml
+ pytest -sv --doctest-modules pydra/tasks/CHANGEME \
+ --cov pydra.tasks.CHANGEME --cov-report xml
- uses: codecov/codecov-action@v3
if: ${{ always() }}
@@ -92,12 +117,26 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: [3.9]
+ python-version: ['3.11']
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
fetch-depth: 0
+ - name: Download tasks converted from Nipype
+ uses: actions/download-artifact@v3
+ with:
+ name: converted-nipype
+ path: pydra/tasks/CHANGEME/auto
+ - name: Tag release with a post-release based on Nipype and Nipype2Pydra versions
+ if: github.event_name == 'repository_dispatch'
+ run: |
+ TAG=$(git tag -l | tail -n 1 | awk -F post '{print $1}')
+ POST=$(python -c "from pydra.tasks.CHANGEME.auto._version import *; print(post_release)")
+ git checkout $TAG
+ git add -f pydra/tasks/CHANGEME/auto/_version.py
+ git commit -am"added auto-generated version to make new tag for package version"
+ git tag ${TAG}post${POST}
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
@@ -118,7 +157,7 @@ jobs:
# [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter
- name: Check for PyPI token on tag
id: deployable
- if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags')
+ if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch'
env:
PYPI_API_TOKEN: "${{ secrets.PYPI_API_TOKEN }}"
run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi
diff --git a/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt b/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt
new file mode 100644
index 00000000..fae44d4c
--- /dev/null
+++ b/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt
@@ -0,0 +1,10 @@
+black
+attrs>=22.1.0
+nipype
+pydra
+PyYAML>=6.0
+fileformats >=0.8
+fileformats-medimage >=0.4
+fileformats-datascience >= 0.1
+traits
+nipype2pydra
\ No newline at end of file
diff --git a/scripts/pkg_gen/resources/nipype-auto-convert.py b/scripts/pkg_gen/resources/nipype-auto-convert.py
index 22dec3c1..5c0a10f8 100644
--- a/scripts/pkg_gen/resources/nipype-auto-convert.py
+++ b/scripts/pkg_gen/resources/nipype-auto-convert.py
@@ -3,6 +3,7 @@
import os.path
from warnings import warn
from pathlib import Path
+import shutil
from importlib import import_module
import yaml
import nipype
@@ -24,38 +25,46 @@
f"using development version of nipype2pydra ({nipype2pydra.__version__}), "
f"development component will be dropped in {PKG_NAME} package version"
)
-n2p_version = nipype2pydra.__version__.split(".dev")[0]
-
-auto_version = f"{nipype.__version__}.{n2p_version}"
-
# Insert specs dir into path so we can load callables modules
sys.path.insert(0, str(SPECS_DIR))
auto_init = f"# Auto-generated by {__file__}, do not edit as it will be overwritten\n\n"
-for fspath in SPECS_DIR.glob("**/*.yaml"):
+auto_dir = PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto"
+if auto_dir.exists():
+ shutil.rmtree(auto_dir)
+
+for fspath in sorted(SPECS_DIR.glob("**/*.yaml")):
with open(fspath) as f:
spec = yaml.load(f, Loader=yaml.SafeLoader)
- rel_pkg_path = str(fspath.relative_to(SPECS_DIR)).replace(os.path.sep, ".")
+ rel_pkg_path = str(fspath.parent.relative_to(SPECS_DIR)).replace(os.path.sep, ".")
+ if rel_pkg_path == ".":
+ rel_pkg_path = fspath.stem
+ else:
+ rel_pkg_path += "." + fspath.stem
+
callables = import_module(rel_pkg_path + "_callables")
- module_name = fspath.name.lower()
converter = TaskConverter(
- output_module=f"pydra.tasks.{PKG_NAME}.auto.{module_name}",
+ output_module=f"pydra.tasks.{PKG_NAME}.auto.{spec['task_name']}",
callables_module=callables, # type: ignore
- **spec
+ **spec,
)
converter.generate(PKG_ROOT)
- auto_init += f"from .{module_name} import {spec['task_name']}\n"
+ auto_init += f"from .{spec['task_name']} import {converter.task_name}\n"
with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "_version.py", "w") as f:
- f.write(f"""# Auto-generated by {__file__}, do not edit as it will be overwritten
+ f.write(
+ f"""# Auto-generated by {__file__}, do not edit as it will be overwritten
-auto_version = {auto_version}
-""")
+nipype_version = "{nipype.__version__.split('.dev')[0]}"
+nipype2pydra_version = "{nipype2pydra.__version__.split('.dev')[0]}"
+post_release = (nipype_version + nipype2pydra_version).replace(".", "")
+"""
+ )
with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "__init__.py", "w") as f:
f.write(auto_init)
diff --git a/scripts/pkg_gen/resources/pkg_init.py b/scripts/pkg_gen/resources/pkg_init.py
index fa251d0d..75afa885 100644
--- a/scripts/pkg_gen/resources/pkg_init.py
+++ b/scripts/pkg_gen/resources/pkg_init.py
@@ -3,20 +3,35 @@
imported.
>>> import pydra.engine
->>> import pydra.tasks.freesurfer
+>>> import pydra.tasks.CHANGEME
"""
+from warnings import warn
+from pathlib import Path
+
+pkg_path = Path(__file__).parent.parent
+
try:
- from ._version import __version__ as main_version
+ from ._version import __version__
except ImportError:
- pass
-
-from .auto._version import auto_version # Get version of
+ raise RuntimeError(
+ "pydra-CHANGEME has not been properly installed, please run "
+ f"`pip install -e {str(pkg_path)}` to install a development version"
+ )
+if "nipype" not in __version__:
+ try:
+ from .auto._version import nipype_version, nipype2pydra_version
+ except ImportError:
+ warn(
+ "Nipype interfaces haven't been automatically converted from their specs in "
+ f"`nipype-auto-conv`. Please run `{str(pkg_path / 'nipype-auto-conv' / 'generate')}` "
+ "to generated the converted Nipype interfaces in pydra.tasks.CHANGEME.auto"
+ )
+ else:
+ n_ver = nipype_version.replace(".", "_")
+ n2p_ver = nipype2pydra_version.replace(".", "_")
+ __version__ += (
+ "_" if "+" in __version__ else "+"
+ ) + f"nipype{n_ver}_nipype2pydra{n2p_ver}"
-if ".dev" in main_version:
- main_version, dev_version = main_version.split(".dev")
-else:
- dev_version = None
-__version__ = main_version + "." + auto_version
-if dev_version:
- __version__ += ".dev" + dev_version
+__all__ = ["__version__"]
diff --git a/tests/test_task.py b/tests/test_task.py
index b066a2df..6dbc5756 100644
--- a/tests/test_task.py
+++ b/tests/test_task.py
@@ -2,6 +2,7 @@
import yaml
from conftest import show_cli_trace
import pytest
+import shutil
import logging
from nipype2pydra.cli import task as task_cli
from nipype2pydra.utils import add_to_sys_path
@@ -21,11 +22,13 @@
]
-def test_task_conversion(task_spec_file, cli_runner, work_dir):
+def test_task_conversion(task_spec_file, cli_runner, work_dir, gen_test_conftest):
with open(task_spec_file) as f:
task_spec = yaml.safe_load(f)
pkg_root = work_dir / "src"
+ pkg_root.mkdir()
+ # shutil.copyfile(gen_test_conftest, pkg_root / "conftest.py")
output_module_path = f"nipype2pydratest.{task_spec_file.stem.lower()}"