From 73d1e96b775ba56399c6e3f8cb3d70d311dbe3c6 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 25 Jul 2023 10:55:37 +1000 Subject: [PATCH 01/42] implement restructure of specs to add additional level --- .../task/ants_registration_Registration.yaml | 36 +- ...ts_segmentation_N4BiasFieldCorrection.yaml | 27 +- ...eesurfer_preprocess_ApplyVolTransform.yaml | 27 +- nipype2pydra/task.py | 376 ++++++++++++++---- pyproject.toml | 3 + scripts/pkg_gen/resources/conftest.py | 71 ++++ 6 files changed, 432 insertions(+), 108 deletions(-) create mode 100644 scripts/pkg_gen/resources/conftest.py diff --git a/example-specs/task/ants_registration_Registration.yaml b/example-specs/task/ants_registration_Registration.yaml index 8f848476..deb20aea 100644 --- a/example-specs/task/ants_registration_Registration.yaml +++ b/example-specs/task/ants_registration_Registration.yaml @@ -1,16 +1,32 @@ task_name: Registration nipype_module: nipype.interfaces.ants.registration -output_requirements: - output_warped_image: ["fixed_image", "moving_image", "output_transform_prefix"] -output_templates: - output_warped_image: "{output_transform_prefix}warped" +nipype_name: +inputs: + omit: + rename: + types: + fixed_image: medimage/nifti-gz + moving_image: medimage/nifti-gz + metadata: +outputs: + omit: + rename: + types: + callables: + requirements: + output_warped_image: ["fixed_image", "moving_image", "output_transform_prefix"] + templates: + output_warped_image: "{output_transform_prefix}warped" +test: doctest: - fixed_image: test.nii.gz - moving_image: test.nii.gz cmdline: >- antsRegistration --output [ output_, output_warped_image.nii.gz ] - --metric Mattes[ test.nii, test.nii, 1, 32, Random, 0.05 ] - tests_inputs: [] - tests_outputs: - - AttributeError + --metric Mattes[ /mock/medimage/nifti-gz.nii.gz, /mock/medimage/nifti-gz.nii.gz, + 1, 32, Random, 0.05 ] + inputs: + fixed_image: + moving_image: + + + \ No newline at end of file diff --git a/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml b/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml index 7d2b8fdf..eabb3de6 100644 --- a/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml +++ b/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml @@ -1,12 +1,21 @@ -nipype_module: nipype.interfaces.ants.segmentation task_name: N4BiasFieldCorrection -output_requirements: - output_image: [] - bias_image: ["save_bias"] -output_templates: - output_image: "" +nipype_module: nipype.interfaces.ants.segmentation +nipype_name: +inputs: + omit: + rename: + types: + metadata: +outputs: + omit: + rename: + types: + callables: + templates: + requirements: + bias_image: ["save_bias"] +test: doctest: - input_image: test.nii.gz cmdline: -tests_inputs: [] -tests_outputs: [] # - AttributeError + inputs: + input_image: test.nii.gz diff --git a/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml b/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml index 14fe49e4..441f2ac9 100644 --- a/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml +++ b/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml @@ -1,10 +1,21 @@ -nipype_module: nipype.interfaces.freesurfer.preprocess task_name: ApplyVolTransform -output_requirements: [] -output_templates: - transformed_file: "{source_file}_warped" +nipype_module: nipype.interfaces.freesurfer.preprocess +nipype_name: +inputs: + omit: + rename: + types: + source_file: medimage/nifti-gz + metadata: +outputs: + omit: + rename: + types: + callables: + templates: + transformed_file: "{source_file}_warped" + requirements: doctest: - source_file: test.nii.gz - cmdline: mri_vol2vol -tests_inputs: [] -tests_outputs: [] \ No newline at end of file + cmdline: mri_vol2vol /mock/medimage/nifti-gz.nii.gz + inputs: + source_file: diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 7cbeb531..de14d48d 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -1,40 +1,244 @@ import os from pathlib import Path import typing as ty -from types import ModuleType import re +from importlib import import_module +from types import ModuleType import inspect import black import traits import attrs +from attrs.converters import default_if_none import nipype.interfaces.base from nipype.interfaces.base import traits_extension from pydra.engine import specs from pydra.engine.helpers import ensure_list from .utils import import_module_from_path +from fileformats.core import DataType + + +def str_to_type(type_str: str) -> type: + """Resolve a string representation of a type into a valid type""" + if "/" in type_str: + tp = DataType.from_mime(type_str) + try: + # If datatype is a field, use its primitive instead + tp = tp.primitive # type: ignore + except AttributeError: + pass + elif "." in type_str: + parts = type_str.split(".") + module = import_module(".".join(parts[:-1])) + tp = getattr(module, parts[-1]) + if not inspect.isclass(tp): + raise TypeError(f"Designated type at {type_str} is not a class {tp}") + elif re.match(r"^\w+$", type_str): + tp = eval(type_str) + else: + raise ValueError(f"Cannot parse {type_str} to a type safely") + return tp + + +def types_converter(types: ty.Dict[str, ty.Union[str, type]]) -> ty.Dict[str, type]: + if types is None: + return {} + converted = {} + for name, tp_or_str in types.items(): + if isinstance(tp_or_str, str): + if tp_or_str.startswith("union:"): + union_tps = tuple( + str_to_type(p) for p in tp_or_str[len("union:") :].split(",") + ) + tp: ty.Type[ty.Union] = ty.Union.__getitem__(union_tps) # type: ignore + else: + tp = str_to_type(tp_or_str) + converted[name] = tp + return converted + + +@attrs.define +class SpecConverter: + omit: ty.List[str] = attrs.field( + factory=list, converter=default_if_none(factory=list) # type: ignore + ) + rename: ty.Dict[str, str] = attrs.field( + factory=dict, converter=default_if_none(factory=dict) # type: ignore + ) + types: ty.Dict[str, type] = attrs.field(converter=types_converter, factory=dict) + + +@attrs.define +class InputsConverter(SpecConverter): + """Specification of how to conver Nipype inputs into Pydra inputs + + Parameters + ---------- + omit : list[str], optional + input fields to omit from the Pydra interface + rename : dict[str, str], optional + input fields to rename in the Pydra interface + types : dict[str, type], optional + types to set explicitly (i.e. instead of determining from nipype interface), + particularly relevant for file-types, where specifying the format will determine + the type of file that is passed to the field in the automatically generated unittests + metadata: dict[str, dict[str, Any]], optional + additional metadata to set on any of the input fields (e.g. out_file: position: 1) + """ + + metadata: ty.Dict[str, ty.Dict[str, ty.Any]] = attrs.field( + factory=dict, converter=default_if_none(factory=dict) # type: ignore + ) + + +@attrs.define +class OutputsConverter(SpecConverter): + """Specification of how to conver Nipype outputs into Pydra outputs + + Parameters + ---------- + omit : list[str], optional + input fields to omit from the Pydra interface + rename : dict[str, str], optional + input fields to rename in the Pydra interface + types : dict[str, type], optional + types to set explicitly (i.e. instead of determining from nipype interface), + particularly relevant for file-types, where specifying the format will determine + the type of file that is passed to the field in the automatically generated unittests + callables : dict[str, str or callable], optional + callables that need to be set in order to extract the values of the outputs + templates : dict[str, str], optional + templates that need to be provided to the outputs + requirements : dict[str, list[str]] + input fields that are required to be provided for the output field to be present + """ + + callables: ty.Dict[str, str] = attrs.field( + factory=dict, converter=default_if_none(factory=dict) # type: ignore + ) + templates: ty.Dict[str, str] = attrs.field( + factory=dict, converter=default_if_none(factory=dict) # type: ignore + ) + requirements: ty.Dict[str, ty.List[str]] = attrs.field( + factory=dict, converter=default_if_none(factory=dict) # type: ignore + ) + + @callables.validator + def callables_validator(self, _, output_callables: dict): + overlapping = set(output_callables.keys()) & set(self.templates.keys()) + if overlapping: + raise ValueError( + f"callables and templates have overlapping same keys: {overlapping}" + ) + + +@attrs.define +class TestsGenerator: + """Specifications for the automatically generated test for the generated Nipype spec + + Parameters + ---------- + inputs : dict[str, str], optional + values to provide to specific inputs fields (if not provided, a sensible value + within the valid range will be provided) + outputs: dict[str, str], optional + expected values for selected outputs, noting that in tests will typically + be terminated before they complete for time-saving reasons and will therefore + be ignored + timeout: int, optional + the time to wait for in order to be satisfied that the tool has been initialised + and performs any internal validation before exiting + """ + + inputs: ty.Dict[str, str] = attrs.field( + factory=dict, converter=default_if_none(factory=dict) # type: ignore + ) + outputs: ty.Dict[str, str] = attrs.field( + factory=dict, converter=default_if_none(factory=dict) # type: ignore + ) + timeout: int = 10 + + +@attrs.define +class DocTestGenerator: + """Specifies how the doctest should be constructed + + Parameters + ---------- + cmdline: str + the expected cmdline output + inputs : dict[str, str or None] + name-value pairs for inputs to be provided to the doctest. If the value is None + then the ".mock()" method of the corresponding class is used instead (only + valid for file-format types). + """ + + cmdline: str + inputs: ty.Dict[str, str] = attrs.field(factory=dict) + + +T = ty.TypeVar("T") + + +def convert_from_dict(obj: object, klass: ty.Type[T]) -> T: + if obj is None: + obj = klass() + elif isinstance(obj, dict): + obj = klass(**obj) + elif not isinstance(obj, klass): + raise TypeError( + f"Input must be of type {klass} or dict, not {type(obj)}: {obj}" + ) + return obj + @attrs.define class TaskConverter: + """Specifies how the semi-automatic conversion from Nipype to Pydra should + be performed + + Parameters + ---------- + task_name: str + name of the Pydra task + nipype_module: str or ModuleType + the nipype module or module path containing the Nipype interface + nipype_name: str, optional + the name of the task in the nipype module, defaults to the output task_name + output_module: str + relative path to the package root to write the output module to ('.' delimited) + inputs: InputsConverter or dict + specficiations for the conversion of inputs + outputs: OutputsConverter or dict + specficiations for the conversion of inputs + test: TestsGenerator or dict, optional + specficiations for how to construct the test. A default test is generated if no + specs are provided + doctest: DocTestGenerator or dict, optional + specifications for how to construct the docttest. Doctest is omitted if not + provided + callables_module: ModuleType or str, optional + a module, or path to a module, containing any required callables + """ task_name: str nipype_module: ModuleType = attrs.field(converter=import_module_from_path) - output_requirements: dict = attrs.field(factory=dict) - inputs_metadata: dict = attrs.field(factory=dict) - inputs_drop: dict = attrs.field(factory=dict) - output_templates: dict = attrs.field(factory=dict) - output_callables: dict = attrs.field(factory=dict) - doctest: dict = attrs.field(factory=dict) - tests_inputs: list = attrs.field(factory=list) - tests_outputs: list = attrs.field(factory=list) output_module: str = attrs.field(default=None) + nipype_name: str = attrs.field(default=None) + inputs: InputsConverter = attrs.field( # type: ignore + factory=InputsConverter, + converter=lambda x: convert_from_dict(x, InputsConverter), + ) + outputs: OutputsConverter = attrs.field( # type: ignore + factory=OutputsConverter, + converter=lambda x: convert_from_dict(x, OutputsConverter), + ) callables_module: ModuleType = attrs.field( converter=import_module_from_path, default=None ) - - @output_callables.validator - def output_callables_validator(self, _, output_callables: dict): - if not output_callables.keys().isdisjoint(self.output_templates.keys()): - raise Exception("output_callables and output_templates have the same keys") + test: TestsGenerator = attrs.field( # type: ignore + factory=TestsGenerator, converter=lambda x: convert_from_dict(x, TestsGenerator) + ) + doctest: ty.Optional[DocTestGenerator] = attrs.field(default=None) def __attrs_post_init__(self): if self.output_module is None: @@ -42,7 +246,8 @@ def __attrs_post_init__(self): self.output_module = ( "pydra.tasks." + self.nipype_module.__name__[len("nipype.interfaces.") :] - + "." + self.task_name.lower() + + "." + + self.task_name.lower() ) else: raise RuntimeError( @@ -51,10 +256,12 @@ def __attrs_post_init__(self): f"as {self.nipype_module.__name__}.{self.task_name} (i.e. not in " "nipype.interfaces)" ) + if self.nipype_name is None: + self.nipype_name = self.task_name @property def nipype_interface(self) -> nipype.interfaces.base.BaseInterface: - return getattr(self.nipype_module, self.task_name) + return getattr(self.nipype_module, self.nipype_name) @property def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: @@ -71,16 +278,26 @@ def generate(self, package_root: Path): input_fields, inp_templates = self.convert_input_fields() output_fields = self.convert_output_spec(fields_from_template=inp_templates) - output_file = Path(package_root).joinpath(*self.output_module.split(".")).with_suffix(".py") + nonstd_types = set( + f[1] + for f in input_fields + if f[1].__module__ not in ["builtins", "pathlib", "typing"] + ) + + output_file = ( + Path(package_root) + .joinpath(*self.output_module.split(".")) + .with_suffix(".py") + ) testdir = output_file.parent / "tests" testdir.mkdir(parents=True) self.write_task(output_file, input_fields, output_fields) - filename_test = testdir / f"test_spec_{self.task_name.lower()}.py" - filename_test_run = testdir / f"test_run_{self.task_name.lower()}.py" - self.write_test(filename_test=filename_test) - self.write_test(filename_test=filename_test_run, run=True) + filename_test = testdir / f"test_{self.task_name.lower()}.py" + # filename_test_run = testdir / f"test_run_{self.task_name.lower()}.py" + self.write_test(filename_test=filename_test, nonstd_types=nonstd_types) + # self.write_test(filename_test=filename_test_run, run=True) def convert_input_fields(self): """creating fields list for pydra input spec""" @@ -90,7 +307,7 @@ def convert_input_fields(self): for name, fld in self.nipype_input_spec.traits().items(): if name in self.TRAITS_IRREL: continue - if name in self.inputs_drop: + if name in self.inputs.omit: continue fld_pdr, pos = self.pydra_fld_input(fld, name) meta_pdr = fld_pdr[-1] @@ -106,8 +323,8 @@ def convert_input_fields(self): def pydra_fld_input(self, field, nm): """converting a single nipype field to one element of fields for pydra input_spec""" tp_pdr = self.pydra_type_converter(field, spec_type="input", name=nm) - if nm in self.inputs_metadata: - metadata_extra_spec = self.inputs_metadata[nm] + if nm in self.inputs.metadata: + metadata_extra_spec = self.inputs.metadata[nm] else: metadata_extra_spec = {} @@ -140,14 +357,19 @@ def pydra_fld_input(self, field, nm): if tp_pdr in [specs.File, specs.Directory]: tp_pdr = str elif getattr(field, "genfile"): - if nm in self.output_templates: - metadata_pdr["output_file_template"] = self.output_templates[nm] + if nm in self.outputs.templates: + try: + metadata_pdr["output_file_template"] = self.outputs.templates[nm] + except KeyError: + raise Exception( + f"{nm} is has genfile and therefore needs an 'output_file_template' value" + ) if tp_pdr in [ specs.File, specs.Directory, ]: # since this is a template, the file doesn't exist - tp_pdr = str - elif nm not in self.output_callables: + tp_pdr = Path + elif nm not in self.outputs.callables: raise Exception( f"the filed {nm} has genfile=True, but no output template or callables_module provided" ) @@ -165,7 +387,7 @@ def convert_output_spec(self, fields_from_template): """creating fields list for pydra input spec""" fields_pdr_l = [] for name, fld in self.nipype_output_spec.traits().items(): - if name in self.output_requirements and name not in fields_from_template: + if name in self.outputs.requirements and name not in fields_from_template: fld_pdr = self.pydra_fld_output(fld, name) fields_pdr_l.append((name,) + fld_pdr) return fields_pdr_l @@ -181,14 +403,14 @@ def pydra_fld_output(self, field, name): if val: metadata_pdr[key_nm_pdr] = val - if self.output_requirements[name]: - if all([isinstance(el, list) for el in self.output_requirements[name]]): - requires_l = self.output_requirements[name] + if self.outputs.requirements[name]: + if all([isinstance(el, list) for el in self.outputs.requirements[name]]): + requires_l = self.outputs.requirements[name] nested_flag = True elif all( - [isinstance(el, (str, dict)) for el in self.output_requirements[name]] + [isinstance(el, (str, dict)) for el in self.outputs.requirements[name]] ): - requires_l = [self.output_requirements[name]] + requires_l = [self.outputs.requirements[name]] nested_flag = False else: Exception("has to be either list of list or list of str/dict") @@ -205,16 +427,16 @@ def pydra_fld_output(self, field, name): if nested_flag is False: metadata_pdr["requires"] = metadata_pdr["requires"][0] - if name in self.output_templates: + if name in self.outputs.templates: metadata_pdr["output_file_template"] = self.interface_spec[ "output_templates" ][name] - elif name in self.output_callables: - metadata_pdr["callable"] = self.output_callables[name] + elif name in self.outputs.callables: + metadata_pdr["callable"] = self.outputs.callables[name] return (tp_pdr, metadata_pdr) def function_callables(self): - if not self.output_callables: + if not self.outputs.callables: return "" python_functions_spec = ( Path(os.path.dirname(__file__)) / "../specs/callables.py" @@ -224,7 +446,7 @@ def function_callables(self): "specs/callables.py file is needed if output_callables in the spec files" ) fun_str = "" - fun_names = list(set(self.output_callables.values())) + fun_names = list(set(self.outputs.callables.values())) fun_names.sort() for fun_nm in fun_names: fun = getattr(self.callables_module, fun_nm) @@ -237,6 +459,11 @@ def pydra_type_converter(self, field, spec_type, name): raise Exception( f"spec_type has to be input or output, but {spec_type} provided" ) + types_dict = self.inputs.types if spec_type == "inputs" else self.outputs.types + try: + return types_dict[name] + except KeyError: + pass tp = field.trait_type if isinstance(tp, traits.trait_types.Int): tp_pdr = int @@ -272,7 +499,7 @@ def pydra_type_converter(self, field, spec_type, name): ): # TODO check the hash_file metadata in nipype tp_pdr = specs.File else: - tp_pdr = str + tp_pdr = Path else: tp_pdr = ty.Any return tp_pdr @@ -295,7 +522,7 @@ def string_formats(self, argstr, name): raise Exception(f"format from {argstr} is not supported TODO") return argstr_new - def write_task(self, filename, input_fields, output_fields): + def write(self, filename, input_fields, output_fields): """writing pydra task to the dile based on the input and output spec""" def types_to_names(spec_fields): @@ -342,54 +569,41 @@ def types_to_names(spec_fields): with open(filename, "w") as f: f.write(spec_str_black) - def write_test(self, filename_test, run=False): + def write_test(self, filename_test, nonstd_types, run=False): """writing tests for the specific interface based on the test spec (from interface_spec) if run is True the test contains task run, if run is False only the spec is check by the test """ - tests_inputs = self.tests_inputs - tests_outputs = self.tests_outputs - if len(tests_inputs) != len(tests_outputs): - raise Exception("tests and tests_outputs should have the same length") - - tests_inp_outp = [] - tests_inp_error = [] - for i, out in enumerate(tests_outputs): - if isinstance(out, list): - tests_inp_outp.append((tests_inputs[i], out)) - elif out is None: - tests_inp_outp.append((tests_inputs[i], [])) - # allowing for incomplete or incorrect inputs that should raise an exception - elif out not in ["AttributeError", "Exception"]: - tests_inp_outp.append((tests_inputs[i], [out])) - else: - tests_inp_error.append((tests_inputs[i], out)) + # if len(self.test.inputs) != len(self.test.outputs): + # raise Exception("tests and self.test.outputs should have the same length") + + # tests_inp_outp = [] + # tests_inp_error = [] + # for i, out in enumerate(self.test.outputs): + # if isinstance(out, list): + # tests_inp_outp.append((self.test.inputs[i], out)) + # elif out is None: + # tests_inp_outp.append((self.test.inputs[i], [])) + # # allowing for incomplete or incorrect inputs that should raise an exception + # elif out not in ["AttributeError", "Exception"]: + # tests_inp_outp.append((self.test.inputs[i], [out])) + # else: + # tests_inp_error.append((self.test.inputs[i], out)) spec_str = "import os, pytest \nfrom pathlib import Path\n" - spec_str += f"from {self.output_module} import {self.task_name} \n\n" - if run: - pass - spec_str += f"@pytest.mark.parametrize('inputs, outputs', {tests_inp_outp})\n" - spec_str += f"def test_{self.task_name.lower()}(test_data, inputs, outputs):\n" - spec_str += " in_file = Path(test_data) / 'test.nii.gz'\n" - spec_str += " if inputs is None: inputs = {{}}\n" - spec_str += " for key, val in inputs.items():\n" - spec_str += " try: inputs[key] = eval(val)\n" - spec_str += " except: pass\n" - spec_str += f" task = {self.task_name}(in_file=in_file, **inputs)\n" - spec_str += ( - " assert set(task.generated_output_names) == " - "set(['return_code', 'stdout', 'stderr'] + outputs)\n" - ) - - if run: + spec_str += f"from {self.output_module} import {self.task_name}\n" + for tp in nonstd_types: + spec_str += f"from {tp.__module__} import {tp.__name__}\n" + spec_str += "\n" + spec_str += f"@pytest.mark.timeout_pass(timeout={self.test.timeout})" + spec_str += f"def test_{self.task_name.lower()}():\n" + spec_str += f" task = {self.task_name}()\n" + for field in self.nipype_input_spec: + value = field.default + spec_str += f" task.inputs.{field.name} = {value}\n" spec_str += " res = task()\n" spec_str += " print('RESULT: ', res)\n" - spec_str += " for out_nm in outputs: assert getattr(res.output, out_nm).exists()\n" - - # if test_inp_error is not empty, than additional test function will be created - if tests_inp_error: - spec_str += self.write_test_error(input_error=tests_inp_error) + # spec_str += " for out_nm in outputs: assert getattr(res.output, out_nm).exists()\n" spec_str_black = black.format_file_contents( spec_str, fast=False, mode=black.FileMode() diff --git a/pyproject.toml b/pyproject.toml index b58a5c0a..ffd54527 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,9 @@ dependencies = [ "nipype", "pydra", "PyYAML>=6.0", + "fileformats >=0.8", + "fileformats-medimage >=0.4", + "traits", ] license = {file = "LICENSE"} authors = [ diff --git a/scripts/pkg_gen/resources/conftest.py b/scripts/pkg_gen/resources/conftest.py new file mode 100644 index 00000000..89d3215f --- /dev/null +++ b/scripts/pkg_gen/resources/conftest.py @@ -0,0 +1,71 @@ +import os +import typing as ty +import time +import logging +from pathlib import Path +import tempfile +import pytest + + +try: + from pydra import set_input_validator + + set_input_validator(True) +except ImportError: + pass +from fileformats.core.utils import include_testing_package + +include_testing_package(True) + +# Set DEBUG logging for unittests + +log_level = logging.WARNING + +logger = logging.getLogger("fileformats") +logger.setLevel(log_level) + +sch = logging.StreamHandler() +sch.setLevel(log_level) +formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") +sch.setFormatter(formatter) +logger.addHandler(sch) + + +# For debugging in IDE's don't catch raised exceptions and let the IDE +# break at it +if os.getenv("_PYTEST_RAISE", "0") != "0": + + @pytest.hookimpl(tryfirst=True) + def pytest_exception_interact(call): + raise call.excinfo.value + + @pytest.hookimpl(tryfirst=True) + def pytest_internalerror(excinfo): + raise excinfo.value + + +@pytest.fixture +def work_dir(): + work_dir = tempfile.mkdtemp() + return Path(work_dir) + + +def pytest_configure(config): + config.addinivalue_line( + "markers", "timeout_pass: mark test as passing if it runs until timeout" + ) + + +def pytest_runtest_protocol(item, nextitem): + marker = item.get_closest_marker("timeout_pass") + if marker is not None: + timeout = marker.kwargs.get("timeout", None) + if timeout is not None: + start_time = time.time() + timeout_duration = timeout + + while time.time() - start_time < timeout_duration: + nextitem() + pytest.xpass(f"Test passed by running through the timeout of {timeout_duration} seconds.") + + return None From 2cbdfadfc913f5bad8652a73f060aaa5273dc4fb Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 08:19:27 +1000 Subject: [PATCH 02/42] added hep strings to spec metadata and redesigned generated test --- nipype2pydra/task.py | 288 +++++++++++++++++++++++++++++-------------- 1 file changed, 195 insertions(+), 93 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index de14d48d..30aa65e9 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -6,7 +6,7 @@ from types import ModuleType import inspect import black -import traits +import traits.trait_types import attrs from attrs.converters import default_if_none import nipype.interfaces.base @@ -14,7 +14,7 @@ from pydra.engine import specs from pydra.engine.helpers import ensure_list from .utils import import_module_from_path -from fileformats.core import DataType +from fileformats.core import DataType, FileSet def str_to_type(type_str: str) -> type: @@ -59,12 +59,26 @@ def types_converter(types: ty.Dict[str, ty.Union[str, type]]) -> ty.Dict[str, ty @attrs.define class SpecConverter: omit: ty.List[str] = attrs.field( - factory=list, converter=default_if_none(factory=list) # type: ignore + factory=list, + converter=default_if_none(factory=list), # type: ignore + metadata={"help": "Fields to omit from the Pydra interface"}, ) rename: ty.Dict[str, str] = attrs.field( - factory=dict, converter=default_if_none(factory=dict) # type: ignore + factory=dict, + converter=default_if_none(factory=dict), # type: ignore + metadata={"help": "fields to rename in the Pydra interface"}, + ) + types: ty.Dict[str, type] = attrs.field( + converter=types_converter, + factory=dict, + metadata={ + "help": """Override inferred type (use mime-type string for file-format types). + Most of the time the correct type will be inferred from the nipype interface, + but you may want to be more specific, typically for the case of file types + where specifying the format will change the type of file that will be + passed to the field in the automatically generated unittests.""" + }, ) - types: ty.Dict[str, type] = attrs.field(converter=types_converter, factory=dict) @attrs.define @@ -78,15 +92,21 @@ class InputsConverter(SpecConverter): rename : dict[str, str], optional input fields to rename in the Pydra interface types : dict[str, type], optional - types to set explicitly (i.e. instead of determining from nipype interface), - particularly relevant for file-types, where specifying the format will determine - the type of file that is passed to the field in the automatically generated unittests + Override inferred type (use mime-type string for file-format types). + Most of the time the correct type will be inferred from the nipype interface, + but you may want to be more specific, typically for the case of file types + where specifying the format will change the type of file that will be + passed to the field in the automatically generated unittests. metadata: dict[str, dict[str, Any]], optional additional metadata to set on any of the input fields (e.g. out_file: position: 1) """ metadata: ty.Dict[str, ty.Dict[str, ty.Any]] = attrs.field( - factory=dict, converter=default_if_none(factory=dict) # type: ignore + factory=dict, + converter=default_if_none(factory=dict), # type: ignore + metadata={ + "help": "additional metadata to set on any of the input fields (e.g. out_file: position: 1)" + }, ) @@ -105,21 +125,35 @@ class OutputsConverter(SpecConverter): particularly relevant for file-types, where specifying the format will determine the type of file that is passed to the field in the automatically generated unittests callables : dict[str, str or callable], optional - callables that need to be set in order to extract the values of the outputs + names of methods/callable classes defined in the adjacent `*_callables.py` + to set to the `callable` attribute of output fields templates : dict[str, str], optional - templates that need to be provided to the outputs + `output_file_template` values to be provided to output fields requirements : dict[str, list[str]] input fields that are required to be provided for the output field to be present """ callables: ty.Dict[str, str] = attrs.field( - factory=dict, converter=default_if_none(factory=dict) # type: ignore + factory=dict, + converter=default_if_none(factory=dict), # type: ignore + metadata={ + "help": """names of methods/callable classes defined in the adjacent `*_callables.py` + to set to the `callable` attribute of output fields""" + }, ) templates: ty.Dict[str, str] = attrs.field( - factory=dict, converter=default_if_none(factory=dict) # type: ignore + factory=dict, + converter=default_if_none(factory=dict), # type: ignore + metadata={ + "help": "`output_file_template` values to be provided to output fields" + }, ) requirements: ty.Dict[str, ty.List[str]] = attrs.field( - factory=dict, converter=default_if_none(factory=dict) # type: ignore + factory=dict, + converter=default_if_none(factory=dict), # type: ignore + metadata={ + "help": "input fields that are required to be provided for the output field to be present" + }, ) @callables.validator @@ -150,12 +184,30 @@ class TestsGenerator: """ inputs: ty.Dict[str, str] = attrs.field( - factory=dict, converter=default_if_none(factory=dict) # type: ignore + factory=dict, + converter=default_if_none(factory=dict), # type: ignore + metadata={ + "help": """values to provide to specific inputs fields (if not provided, + a sensible value within the valid range will be provided""" + }, ) outputs: ty.Dict[str, str] = attrs.field( - factory=dict, converter=default_if_none(factory=dict) # type: ignore + factory=dict, converter=default_if_none(factory=dict), # type: ignore + metadata={ + "help": """expected values for selected outputs, noting that in tests will typically + be terminated before they complete for time-saving reasons and will therefore + be ignored""" + }, + ) + timeout: int = attrs.field( + default=10, + metadata={ + "help": """The value to set for the timeout in the generated test, + "after which the test will be considered to have been initialised + "successulfully. Set to 0 to disable the timeout (warning, this could + lead to the unittests taking a very long time to complete)""" + } ) - timeout: int = 10 @attrs.define @@ -172,23 +224,53 @@ class DocTestGenerator: valid for file-format types). """ - cmdline: str - inputs: ty.Dict[str, str] = attrs.field(factory=dict) + cmdline: str = attrs.field(metadata={"help": "the expected cmdline output"}) + inputs: ty.Dict[str, str] = attrs.field( + factory=dict, + metadata={ + "help": """name-value pairs for inputs to be provided to the doctest. + If the field is of file-format type and the value is None, then the + ".mock()" method of the corresponding class is used instead (only valid + for file-format types)."""} + ) T = ty.TypeVar("T") -def convert_from_dict(obj: object, klass: ty.Type[T]) -> T: +def from_dict_converter( + obj: ty.Union[T, dict], klass: ty.Type[T], allow_none=False +) -> T: if obj is None: - obj = klass() + if allow_none: + converted = None + else: + converted = klass() elif isinstance(obj, dict): - obj = klass(**obj) - elif not isinstance(obj, klass): + converted = klass(**obj) + elif isinstance(obj, klass): + converted = obj + else: raise TypeError( f"Input must be of type {klass} or dict, not {type(obj)}: {obj}" ) - return obj + return converted + + +def from_dict_to_inputs(obj: ty.Union[InputsConverter, dict]) -> InputsConverter: + return from_dict_converter(obj, InputsConverter) + + +def from_dict_to_outputs(obj: ty.Union[OutputsConverter, dict]) -> OutputsConverter: + return from_dict_converter(obj, OutputsConverter) + + +def from_dict_to_test(obj: ty.Union[TestsGenerator, dict]) -> TestsGenerator: + return from_dict_converter(obj, TestsGenerator) + + +def from_dict_to_doctest(obj: ty.Union[DocTestGenerator, dict]) -> DocTestGenerator: + return from_dict_converter(obj, DocTestGenerator, allow_none=True) @attrs.define @@ -224,21 +306,22 @@ class TaskConverter: nipype_module: ModuleType = attrs.field(converter=import_module_from_path) output_module: str = attrs.field(default=None) nipype_name: str = attrs.field(default=None) - inputs: InputsConverter = attrs.field( # type: ignore - factory=InputsConverter, - converter=lambda x: convert_from_dict(x, InputsConverter), + inputs: InputsConverter = attrs.field( + factory=InputsConverter, converter=from_dict_to_inputs ) outputs: OutputsConverter = attrs.field( # type: ignore factory=OutputsConverter, - converter=lambda x: convert_from_dict(x, OutputsConverter), + converter=from_dict_to_outputs, ) callables_module: ModuleType = attrs.field( converter=import_module_from_path, default=None ) test: TestsGenerator = attrs.field( # type: ignore - factory=TestsGenerator, converter=lambda x: convert_from_dict(x, TestsGenerator) + factory=TestsGenerator, converter=from_dict_to_test + ) + doctest: ty.Optional[DocTestGenerator] = attrs.field( + default=None, converter=from_dict_to_doctest ) - doctest: ty.Optional[DocTestGenerator] = attrs.field(default=None) def __attrs_post_init__(self): if self.output_module is None: @@ -292,11 +375,20 @@ def generate(self, package_root: Path): testdir = output_file.parent / "tests" testdir.mkdir(parents=True) - self.write_task(output_file, input_fields, output_fields) + self.write_task( + output_file, + input_fields=input_fields, + output_fields=output_fields, + nonstd_types=nonstd_types, + ) filename_test = testdir / f"test_{self.task_name.lower()}.py" # filename_test_run = testdir / f"test_run_{self.task_name.lower()}.py" - self.write_test(filename_test=filename_test, nonstd_types=nonstd_types) + self.write_test( + filename_test, + input_fields=input_fields, + nonstd_types=nonstd_types, + ) # self.write_test(filename_test=filename_test_run, run=True) def convert_input_fields(self): @@ -522,7 +614,7 @@ def string_formats(self, argstr, name): raise Exception(f"format from {argstr} is not supported TODO") return argstr_new - def write(self, filename, input_fields, output_fields): + def write_task(self, filename, input_fields, nonstd_types, output_fields): """writing pydra task to the dile based on the input and output spec""" def types_to_names(spec_fields): @@ -533,7 +625,7 @@ def types_to_names(spec_fields): el[1] = el[1].__name__ # add 'TYPE_' to the beginning of the name el[1] = "TYPE_" + el[1] - except (AttributeError): + except AttributeError: el[1] = el[1]._name # add 'TYPE_' to the beginning of the name el[1] = "TYPE_" + el[1] @@ -546,15 +638,17 @@ def types_to_names(spec_fields): spec_str = ( "from pydra.engine import specs \nfrom pydra import ShellCommandTask \n" ) - spec_str += "import typing as ty\n" + spec_str += self.import_types(nonstd_types) spec_str += functions_str spec_str += f"input_fields = {input_fields_str}\n" spec_str += f"{self.task_name}_input_spec = specs.SpecInfo(name='Input', fields=input_fields, bases=(specs.ShellSpec,))\n\n" spec_str += f"output_fields = {output_fields_str}\n" spec_str += f"{self.task_name}_output_spec = specs.SpecInfo(name='Output', fields=output_fields, bases=(specs.ShellOutSpec,))\n\n" spec_str += f"class {self.task_name}(ShellCommandTask):\n" - if self.doctest: - spec_str += self.create_doctest() + if self.doctest is not None: + spec_str += self.create_doctest( + input_fields=input_fields, nonstd_types=nonstd_types + ) spec_str += f" input_spec = {self.task_name}_input_spec\n" spec_str += f" output_spec = {self.task_name}_output_spec\n" spec_str += f" executable='{self.nipype_interface._cmd}'\n" @@ -569,41 +663,57 @@ def types_to_names(spec_fields): with open(filename, "w") as f: f.write(spec_str_black) - def write_test(self, filename_test, nonstd_types, run=False): - """writing tests for the specific interface based on the test spec (from interface_spec) - if run is True the test contains task run, - if run is False only the spec is check by the test - """ - # if len(self.test.inputs) != len(self.test.outputs): - # raise Exception("tests and self.test.outputs should have the same length") - - # tests_inp_outp = [] - # tests_inp_error = [] - # for i, out in enumerate(self.test.outputs): - # if isinstance(out, list): - # tests_inp_outp.append((self.test.inputs[i], out)) - # elif out is None: - # tests_inp_outp.append((self.test.inputs[i], [])) - # # allowing for incomplete or incorrect inputs that should raise an exception - # elif out not in ["AttributeError", "Exception"]: - # tests_inp_outp.append((self.test.inputs[i], [out])) - # else: - # tests_inp_error.append((self.test.inputs[i], out)) - - spec_str = "import os, pytest \nfrom pathlib import Path\n" - spec_str += f"from {self.output_module} import {self.task_name}\n" + @staticmethod + def import_types(nonstd_types: ty.List[type], prefix="") -> str: + imports = "import typing as ty\nfrom pathlib import Path\n" for tp in nonstd_types: - spec_str += f"from {tp.__module__} import {tp.__name__}\n" + imports += f"{prefix}from {tp.__module__} import {tp.__name__}\n" + return imports + + def write_test(self, filename_test, input_fields, nonstd_types, run=False): + spec_str = "import os, pytest \n" + spec_str += self.import_types(nonstd_types=nonstd_types) + spec_str += f"from {self.output_module} import {self.task_name}\n" spec_str += "\n" - spec_str += f"@pytest.mark.timeout_pass(timeout={self.test.timeout})" + spec_str += f"@pytest.mark.timeout_pass(timeout={self.test.timeout})\n" spec_str += f"def test_{self.task_name.lower()}():\n" spec_str += f" task = {self.task_name}()\n" - for field in self.nipype_input_spec: - value = field.default - spec_str += f" task.inputs.{field.name} = {value}\n" - spec_str += " res = task()\n" - spec_str += " print('RESULT: ', res)\n" - # spec_str += " for out_nm in outputs: assert getattr(res.output, out_nm).exists()\n" + for field in input_fields: + nm, tp = field[:2] + # Try to get a sensible value for the traits value + try: + value = self.test.inputs[nm] + except KeyError: + if len(field) == 4: # field has default + value = field[2] + else: + assert len(field) == 3 + if inspect.isclass(tp) and issubclass(tp, FileSet): + value = f"{tp.__name__}.sample()" + else: + trait = self.nipype_interface.input_spec.class_traits()[nm] + if isinstance(trait, traits.trait_types.Enum): + value = trait.values[0] + elif isinstance(trait, traits.trait_types.Range): + value = (trait.high - trait.low) / 2.0 + elif isinstance(trait, traits.trait_types.Bool): + value = True + elif isinstance(trait, traits.trait_types.Int): + value = 1 + elif isinstance(trait, traits.trait_types.Float): + value = 1.0 + elif isinstance(trait, traits.trait_types.List): + value = [1] * trait.minlen + elif isinstance(trait, traits.trait_types.Tuple): + value = tuple([1] * len(trait.types)) + else: + value = attrs.NOTHING + if value is not attrs.NOTHING: + spec_str += f" task.inputs.{nm} = {value}\n" + spec_str += " res = task()\n" + spec_str += " print('RESULT: ', res)\n" + for name, value in self.test.outputs.items(): + spec_str += f" assert res.output.{name} == {value}\n" spec_str_black = black.format_file_contents( spec_str, fast=False, mode=black.FileMode() @@ -612,35 +722,27 @@ def write_test(self, filename_test, nonstd_types, run=False): with open(filename_test, "w") as f: f.write(spec_str_black) - def write_test_error(self, input_error): - """creating a tests for incorrect or incomplete inputs - checking if the exceptions are raised - """ - spec_str = "\n\n" - spec_str += f"@pytest.mark.parametrize('inputs, error', {input_error})\n" - spec_str += f"def test_{self.task_name}_exception(test_data, inputs, error):\n" - spec_str += " in_file = Path(test_data) / 'test.nii.gz'\n" - spec_str += " if inputs is None: inputs = {{}}\n" - spec_str += " for key, val in inputs.items():\n" - spec_str += " try: inputs[key] = eval(val)\n" - spec_str += " except: pass\n" - spec_str += f" task = {self.task_name}(in_file=in_file, **inputs)\n" - spec_str += " with pytest.raises(eval(error)):\n" - spec_str += " task.generated_output_names\n" - return spec_str - - def create_doctest(self): + def create_doctest(self, input_fields, nonstd_types): """adding doctests to the interfaces""" - cmdline = self.doctest.pop("cmdline") doctest = ' """\n Example\n -------\n' + doctest += self.import_types(nonstd_types, prefix=" >>> ") doctest += f" >>> task = {self.task_name}()\n" - for key, val in self.doctest.items(): - if type(val) is str: - doctest += f' >>> task.inputs.{key} = "{val}"\n' + for field in input_fields: + nm, tp = field[:2] + try: + val = self.doctest.inputs[nm] + except KeyError: + if inspect.isclass(tp) and issubclass(tp, FileSet): + val = f"{tp.__name__}.mock()" + else: + val = attrs.NOTHING else: - doctest += f" >>> task.inputs.{key} = {val}\n" + if type(val) is str: + val = f'"{val}"' + if val is not attrs.NOTHING: + doctest += f" >>> task.inputs.{nm} = {val}\n" doctest += " >>> task.cmdline\n" - doctest += f" '{cmdline}'" + doctest += f" '{self.doctest.cmdline}'" doctest += '\n """\n' return doctest @@ -682,4 +784,4 @@ def create_doctest(self): ("'TYPE_MultiOutputObj'", "specs.MultiOutputObj"), ("'TYPE_MultiInputFile'", "specs.MultiInputFile"), ("'TYPE_MultiOutputFile'", "specs.MultiOutputFile"), - ] \ No newline at end of file + ] From e62f63655bdde9732c9920270c1a1937dcadebf3 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 08:19:45 +1000 Subject: [PATCH 03/42] add conftest to generated test directory --- tests/test_task.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_task.py b/tests/test_task.py index b066a2df..94159ecd 100644 --- a/tests/test_task.py +++ b/tests/test_task.py @@ -2,6 +2,7 @@ import yaml from conftest import show_cli_trace import pytest +import shutil import logging from nipype2pydra.cli import task as task_cli from nipype2pydra.utils import add_to_sys_path @@ -21,11 +22,13 @@ ] -def test_task_conversion(task_spec_file, cli_runner, work_dir): +def test_task_conversion(task_spec_file, cli_runner, work_dir, gen_test_conftest): with open(task_spec_file) as f: task_spec = yaml.safe_load(f) pkg_root = work_dir / "src" + pkg_root.mkdir() + shutil.copyfile(gen_test_conftest, pkg_root / "conftest.py") output_module_path = f"nipype2pydratest.{task_spec_file.stem.lower()}" From 292f61dadc43e432c5f059b93e8af7bc31514594 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 08:20:47 +1000 Subject: [PATCH 04/42] reworked extract ROI example spec --- example-specs/task/fsl_utils_ExtractROI.yaml | 42 ++++++++++++-------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/example-specs/task/fsl_utils_ExtractROI.yaml b/example-specs/task/fsl_utils_ExtractROI.yaml index 37c6f5df..49b67c7b 100644 --- a/example-specs/task/fsl_utils_ExtractROI.yaml +++ b/example-specs/task/fsl_utils_ExtractROI.yaml @@ -1,20 +1,30 @@ task_name: ExtractROI nipype_module: nipype.interfaces.fsl.utils -output_requirements: - roi_file: [in_file] -output_templates: - roi_file: "{in_file}_trim" -inputs_drop: - - crop_list +nipype_name: +inputs: + omit: + - crop_list + rename: + types: + in_file: medimage/nifti-gz + metadata: +outputs: + omit: + rename: + types: + callables: + requirements: + roi_file: [in_file] + templates: + roi_file: "{in_file}_trim" doctest: - in_file: test.nii.gz - t_min: 0 - t_size: 3 - roi_file: test_trim.nii.gz cmdline: fslroi test.nii.gz test_trim.nii.gz 0 3 -tests_inputs: [] - # - in_file: test.nii.gz - # t_min: 0 - # t_size: 1 -tests_outputs: [] - # - roi_file \ No newline at end of file + inputs: + t_min: 0 + t_size: 3 + roi_file: test_trim.nii.gz +test: + inputs: + t_min: 0 + t_size: 1 + outputs: \ No newline at end of file From 83b2f22dbbb855625cc6e2b0ee0ad238783cc575 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 08:21:10 +1000 Subject: [PATCH 05/42] add conftest into generated package directories --- conftest.py | 13 +++++++++++++ scripts/pkg_gen/create_packages.py | 5 +++++ 2 files changed, 18 insertions(+) diff --git a/conftest.py b/conftest.py index cdf2e046..12c1180a 100644 --- a/conftest.py +++ b/conftest.py @@ -4,6 +4,7 @@ import tempfile import pytest from click.testing import CliRunner +from fileformats.generic import File PKG_DIR = Path(__file__).parent @@ -12,6 +13,18 @@ EXAMPLE_WORKFLOWS_DIR = EXAMPLE_SPECS_DIR / "workflow" +@File.generate_test_data.register +def file_generate_test_data(file: File, dest_dir: Path): + a_file = dest_dir / "a_file.x" + a_file.write_text("a sample file") + return [a_file] + + +@pytest.fixture +def gen_test_conftest(): + return PKG_DIR / "scripts" / "pkg_gen" / "resources" / "conftest.py" + + @pytest.fixture(params=[str(p.stem) for p in (EXAMPLE_TASKS_DIR).glob("*.yaml")]) def task_spec_file(request): return (EXAMPLE_TASKS_DIR / request.param).with_suffix(".yaml") diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 2bd6b2c2..8a5df727 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -78,16 +78,21 @@ def copy_ignore(_, names): shutil.copytree(task_template, pkg_dir, ignore=copy_ignore) + # Setup script to auto-convert nipype interfaces auto_conv_dir = pkg_dir / "nipype-auto-conv" specs_dir = auto_conv_dir / "specs" specs_dir.mkdir(parents=True) shutil.copy(RESOURCES_DIR / "nipype-auto-convert.py", auto_conv_dir / "generate") os.chmod(auto_conv_dir / "generate", 0o755) # make executable + # Setup GitHub workflows gh_workflows_dir = pkg_dir / ".github" / "workflows" gh_workflows_dir.mkdir(parents=True) shutil.copy(RESOURCES_DIR / "pythonpackage.yaml", gh_workflows_dir / "pythonpackage.yaml") + # Add in conftest.py + shutil.copy(RESOURCES_DIR / "conftest.py", pkg_dir / "conftest.py") + # Add "pydra.tasks..auto to gitignore" with open(pkg_dir / ".gitignore", "a") as f: f.write("\npydra/tasks/{pkg}/auto") From adf8c4e3bc140a239e9dc72f28f0c5d75dd17ed1 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 10:51:54 +1000 Subject: [PATCH 06/42] Restructured yaml stubs in created packages so that they include comments on what to put for each field --- nipype-interfaces-to-import.yaml | 5 -- nipype2pydra/task.py | 22 +++-- scripts/pkg_gen/create_packages.py | 88 +++++++++++++++---- .../{pythonpackage.yml => pythonpackage.yaml} | 0 4 files changed, 81 insertions(+), 34 deletions(-) rename scripts/pkg_gen/resources/{pythonpackage.yml => pythonpackage.yaml} (100%) diff --git a/nipype-interfaces-to-import.yaml b/nipype-interfaces-to-import.yaml index 0d335188..4cb65437 100644 --- a/nipype-interfaces-to-import.yaml +++ b/nipype-interfaces-to-import.yaml @@ -220,17 +220,12 @@ interfaces: camino2trackvis/convert: - Camino2Trackvis - Trackvis2Camino - cat12/base: - - Cell - - NestedCell cat12/preprocess: - CAT12Segment - CAT12SANLMDenoising - - Cell2Str cat12/surface: - ExtractAdditionalSurfaceParameters - ExtractROIBasedSurfaceMeasures - - Cell2Str cmtk/cmtk: - CreateMatrix - ROIGen diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 30aa65e9..f96e0b73 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -72,10 +72,10 @@ class SpecConverter: converter=types_converter, factory=dict, metadata={ - "help": """Override inferred type (use mime-type string for file-format types). - Most of the time the correct type will be inferred from the nipype interface, - but you may want to be more specific, typically for the case of file types - where specifying the format will change the type of file that will be + "help": """Override inferred type (use mime-type (like) string for file-format types, + e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + from the nipype interface, but you may want to be more specific, particularly + for file types, where specifying the format also specifies the file that will be passed to the field in the automatically generated unittests.""" }, ) @@ -138,7 +138,7 @@ class OutputsConverter(SpecConverter): converter=default_if_none(factory=dict), # type: ignore metadata={ "help": """names of methods/callable classes defined in the adjacent `*_callables.py` - to set to the `callable` attribute of output fields""" + to set to the `callable` attribute of output fields""" }, ) templates: ty.Dict[str, str] = attrs.field( @@ -203,8 +203,8 @@ class TestsGenerator: default=10, metadata={ "help": """The value to set for the timeout in the generated test, - "after which the test will be considered to have been initialised - "successulfully. Set to 0 to disable the timeout (warning, this could + after which the test will be considered to have been initialised + successulfully. Set to 0 to disable the timeout (warning, this could lead to the unittests taking a very long time to complete)""" } ) @@ -220,8 +220,7 @@ class DocTestGenerator: the expected cmdline output inputs : dict[str, str or None] name-value pairs for inputs to be provided to the doctest. If the value is None - then the ".mock()" method of the corresponding class is used instead (only - valid for file-format types). + then the ".mock()" method of the corresponding class is used instead. """ cmdline: str = attrs.field(metadata={"help": "the expected cmdline output"}) @@ -230,8 +229,7 @@ class DocTestGenerator: metadata={ "help": """name-value pairs for inputs to be provided to the doctest. If the field is of file-format type and the value is None, then the - ".mock()" method of the corresponding class is used instead (only valid - for file-format types)."""} + '.mock()' method of the corresponding class is used instead."""} ) @@ -454,7 +452,7 @@ def pydra_fld_input(self, field, nm): metadata_pdr["output_file_template"] = self.outputs.templates[nm] except KeyError: raise Exception( - f"{nm} is has genfile and therefore needs an 'output_file_template' value" + f"{nm} is has genfile=True and therefore needs an 'output_file_template' value" ) if tp_pdr in [ specs.File, diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 8a5df727..c8695cc3 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -2,13 +2,18 @@ import typing as ty import tempfile import re +from importlib import import_module import subprocess as sp import shutil import tarfile from pathlib import Path +import attrs import requests import click import yaml +import nipype.interfaces.base.core +from nipype2pydra.task import InputsConverter, OutputsConverter, TestsGenerator, DocTestGenerator + RESOURCES_DIR = Path(__file__).parent / "resources" @@ -68,10 +73,11 @@ def generate_packages( shutil.rmtree(output_dir) output_dir.mkdir() + not_interfaces = [] + for pkg in to_import["packages"]: pkg_dir = output_dir / f"pydra-{pkg}" - pkg_dir.mkdir() def copy_ignore(_, names): return [n for n in names if n in (".git", "__pycache__", ".pytest_cache")] @@ -87,7 +93,7 @@ def copy_ignore(_, names): # Setup GitHub workflows gh_workflows_dir = pkg_dir / ".github" / "workflows" - gh_workflows_dir.mkdir(parents=True) + gh_workflows_dir.mkdir(parents=True, exist_ok=True) shutil.copy(RESOURCES_DIR / "pythonpackage.yaml", gh_workflows_dir / "pythonpackage.yaml") # Add in conftest.py @@ -117,25 +123,71 @@ def copy_ignore(_, names): module_spec_dir.mkdir(parents=True) for interface in interfaces: callables_fspath = module_spec_dir / f"{interface}_callables.py" + spec_stub = {} + + def fields_stub(type_): + """Used, in conjunction with some find/replaces after dumping, to + insert comments into the YAML file""" + dct = {} + for field in attrs.fields(type_): + tp = field.type + if tp.__module__ == "builtins": + tp_name = tp.__name__ + else: + tp_name = str(tp).lower() + dct[field.name] = f"# {tp_name} - " + field.metadata['help'].replace("\n ", "\n # ") + "#" + return dct + nipype_module_str = "nipype.interfaces." + ".".join(module.split("/")) + nipype_module = import_module(nipype_module_str) + nipype_interface = getattr(nipype_module, interface) + if not issubclass(nipype_interface, nipype.interfaces.base.core.Interface): + not_interfaces.append(f"{module}.{interface}") + continue spec_stub = { "task_name": interface, - "nipype_module": "nipype.interfaces." + ".".join(module.split("/")), - "output_requirements": "# dict[output-field, list[input-field]] : the required input fields for output-field", - "inputs_metadata": "# dict[input-field, dict[str, Any]] : additional metadata to be inserted into input field", - "inputs_drop": "# list[input-field] : input fields to drop from the spec", - "output_templates": "# dict[input-field, str] : \"output_file_template\" to provide to input field", - "output_callables": f"# dict[output-field, str] : name of function defined in {callables_fspath.name} that retrieves value for output", - "doctest": "# dict[str, Any]: key-value pairs to provide as inputs to the doctest + the expected value of \"cmdline\" as special key-value pair", - "tests_inputs": "# List of inputs to pass to tests", - "tests_outputs": "# list of outputs expected from tests", + "nipype_module": nipype_module_str, + "nipype_name": None, + "inputs": fields_stub(InputsConverter), + "outputs": fields_stub(OutputsConverter), + "test": fields_stub(TestsGenerator), + "doctest": fields_stub(DocTestGenerator), } - yaml_str = yaml.dump(spec_stub, indent=2, sort_keys=False) - # strip inserted line-breaks in long strings (so they can be converted to in-line comments) - yaml_str = re.sub(r"\n ", " ", yaml_str) - # extract comments after they have been dumped as strings - yaml_str = re.sub(r"'#(.*)'", r" # \1", yaml_str) + yaml_str = yaml.dump(spec_stub, indent=4, sort_keys=False, width=4096) + yaml_str = re.sub(r"""("|')#""", "\n #", yaml_str) + yaml_str = re.sub(r"""#("|')""", "", yaml_str) + yaml_str = yaml_str.replace("typing.", "") + yaml_str = yaml_str.replace(r"\n", "\n") + yaml_str = yaml_str.replace(" null", "") + inputs_desc = "" + if nipype_interface.input_spec: + for inpt_name, inpt in nipype_interface.input_spec().traits().items(): + if inpt_name in ("trait_added", "trait_modified"): + continue + inpt_desc = inpt.desc.replace('\n', ' ') if inpt.desc else "" + inputs_desc += f"# {inpt_name} ({type(inpt.trait_type).__name__.lower()}): {inpt_desc}\n" + outputs_desc = "" + if nipype_interface.output_spec: + for outpt_name, outpt in nipype_interface.output_spec().traits().items(): + if inpt_name in ("trait_added", "trait_modified"): + continue + outpt_desc = outpt.desc.replace('\n', ' ') if outpt.desc else "" + outputs_desc += f"# {outpt_name} ({type(outpt.trait_type).__name__.lower()}): {outpt_desc}\n" + # Create a preamble at the top of the specificaiton explaining what to do + preamble = ( + f"""# This file is used to manually specify the semi-automatic conversion + # of '{module}.{interface}' from Nipype to Pydra. Please fill in the empty fields + # below where appropriate + # + # Nipype Inputs Ref. + # ------------------ + {inputs_desc}# + # Nipype Outputs Ref. + # ------------------- + {outputs_desc}\n""" + ).replace(" #", "#") with open(module_spec_dir / (interface + ".yaml"), "w") as f: - f.write(yaml_str) + f.write(preamble + yaml_str) + print(preamble + yaml_str) with open(callables_fspath, "w") as f: f.write( f'"""Module to put any functions that are referred to in {interface}.yaml"""\n' @@ -146,6 +198,8 @@ def copy_ignore(_, names): sp.check_call('git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir) sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) + print("\n".join(not_interfaces)) + if __name__ == "__main__": import sys diff --git a/scripts/pkg_gen/resources/pythonpackage.yml b/scripts/pkg_gen/resources/pythonpackage.yaml similarity index 100% rename from scripts/pkg_gen/resources/pythonpackage.yml rename to scripts/pkg_gen/resources/pythonpackage.yaml From 07b33215523d7fa669ff2c6d704b7e07763ae4fa Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 14:11:50 +1000 Subject: [PATCH 07/42] fine tuning generated conversion spec --- nipype2pydra/task.py | 35 +++-- scripts/pkg_gen/create_packages.py | 215 +++++++++++++++++++++++------ 2 files changed, 192 insertions(+), 58 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index f96e0b73..55ed928b 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -61,7 +61,7 @@ class SpecConverter: omit: ty.List[str] = attrs.field( factory=list, converter=default_if_none(factory=list), # type: ignore - metadata={"help": "Fields to omit from the Pydra interface"}, + metadata={"help": "fields to omit from the Pydra interface"}, ) rename: ty.Dict[str, str] = attrs.field( factory=dict, @@ -72,7 +72,7 @@ class SpecConverter: converter=types_converter, factory=dict, metadata={ - "help": """Override inferred type (use mime-type (like) string for file-format types, + "help": """override inferred types (use \"mime-like\" string for file-format types, e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred from the nipype interface, but you may want to be more specific, particularly for file types, where specifying the format also specifies the file that will be @@ -187,27 +187,33 @@ class TestsGenerator: factory=dict, converter=default_if_none(factory=dict), # type: ignore metadata={ - "help": """values to provide to specific inputs fields (if not provided, - a sensible value within the valid range will be provided""" + "help": """values to provide to inputs fields in the task initialisation + (if not specified, will try to choose a sensible value)""" }, ) - outputs: ty.Dict[str, str] = attrs.field( + expected_outputs: ty.Dict[str, str] = attrs.field( factory=dict, converter=default_if_none(factory=dict), # type: ignore metadata={ - "help": """expected values for selected outputs, noting that in tests will typically - be terminated before they complete for time-saving reasons and will therefore - be ignored""" + "help": """expected values for selected outputs, noting that tests will typically + be terminated before they complete for time-saving reasons, and therefore + these values will be ignored, when running in CI""" }, ) timeout: int = attrs.field( default=10, metadata={ - "help": """The value to set for the timeout in the generated test, + "help": """the value to set for the timeout in the generated test, after which the test will be considered to have been initialised - successulfully. Set to 0 to disable the timeout (warning, this could + successfully. Set to 0 to disable the timeout (warning, this could lead to the unittests taking a very long time to complete)""" } ) + xfail: bool = attrs.field( + default=True, + metadata={ + "help": """whether the unittest is expected to fail or not. Set to false + when you are satisfied with the edits you have made to this file"""} + ) @attrs.define @@ -268,7 +274,10 @@ def from_dict_to_test(obj: ty.Union[TestsGenerator, dict]) -> TestsGenerator: def from_dict_to_doctest(obj: ty.Union[DocTestGenerator, dict]) -> DocTestGenerator: - return from_dict_converter(obj, DocTestGenerator, allow_none=True) + converted = from_dict_converter(obj, DocTestGenerator, allow_none=True) + if converted.inputs is None: + converted = None + return converted @attrs.define @@ -673,6 +682,8 @@ def write_test(self, filename_test, input_fields, nonstd_types, run=False): spec_str += self.import_types(nonstd_types=nonstd_types) spec_str += f"from {self.output_module} import {self.task_name}\n" spec_str += "\n" + if self.test.xfail: + spec_str += "@pytest.mark.xfail\n" spec_str += f"@pytest.mark.timeout_pass(timeout={self.test.timeout})\n" spec_str += f"def test_{self.task_name.lower()}():\n" spec_str += f" task = {self.task_name}()\n" @@ -710,7 +721,7 @@ def write_test(self, filename_test, input_fields, nonstd_types, run=False): spec_str += f" task.inputs.{nm} = {value}\n" spec_str += " res = task()\n" spec_str += " print('RESULT: ', res)\n" - for name, value in self.test.outputs.items(): + for name, value in self.test.expected_outputs.items(): spec_str += f" assert res.output.{name} == {value}\n" spec_str_black = black.format_file_contents( diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index c8695cc3..e5ba1c58 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -4,6 +4,7 @@ import re from importlib import import_module import subprocess as sp +from copy import copy import shutil import tarfile from pathlib import Path @@ -12,7 +13,12 @@ import click import yaml import nipype.interfaces.base.core -from nipype2pydra.task import InputsConverter, OutputsConverter, TestsGenerator, DocTestGenerator +from nipype2pydra.task import ( + InputsConverter, + OutputsConverter, + TestsGenerator, + DocTestGenerator, +) RESOURCES_DIR = Path(__file__).parent / "resources" @@ -28,9 +34,7 @@ def download_tasks_template(output_path: Path): response = requests.get(release_url, headers=headers) if response.status_code != 200: - raise RuntimeError( - f"Did not find release at '{release_url}'" - ) + raise RuntimeError(f"Did not find release at '{release_url}'") data = response.json() tarball_url = data["tarball_url"] @@ -53,7 +57,6 @@ def download_tasks_template(output_path: Path): def generate_packages( output_dir: Path, work_dir: ty.Optional[Path], task_template: ty.Optional[Path] ): - if work_dir is None: work_dir = Path(tempfile.mkdtemp()) @@ -61,11 +64,13 @@ def generate_packages( task_template_tar = work_dir / "task-template.tar.gz" download_tasks_template(task_template_tar) extract_dir = work_dir / "task_template" - with tarfile.open(task_template_tar, 'r:gz') as tar: + with tarfile.open(task_template_tar, "r:gz") as tar: tar.extractall(path=extract_dir) task_template = extract_dir / next(extract_dir.iterdir()) - with open(Path(__file__).parent.parent.parent / "nipype-interfaces-to-import.yaml") as f: + with open( + Path(__file__).parent.parent.parent / "nipype-interfaces-to-import.yaml" + ) as f: to_import = yaml.load(f, Loader=yaml.SafeLoader) # Wipe output dir @@ -76,7 +81,6 @@ def generate_packages( not_interfaces = [] for pkg in to_import["packages"]: - pkg_dir = output_dir / f"pydra-{pkg}" def copy_ignore(_, names): @@ -88,13 +92,18 @@ def copy_ignore(_, names): auto_conv_dir = pkg_dir / "nipype-auto-conv" specs_dir = auto_conv_dir / "specs" specs_dir.mkdir(parents=True) - shutil.copy(RESOURCES_DIR / "nipype-auto-convert.py", auto_conv_dir / "generate") + shutil.copy( + RESOURCES_DIR / "nipype-auto-convert.py", auto_conv_dir / "generate" + ) os.chmod(auto_conv_dir / "generate", 0o755) # make executable # Setup GitHub workflows gh_workflows_dir = pkg_dir / ".github" / "workflows" gh_workflows_dir.mkdir(parents=True, exist_ok=True) - shutil.copy(RESOURCES_DIR / "pythonpackage.yaml", gh_workflows_dir / "pythonpackage.yaml") + shutil.copy( + RESOURCES_DIR / "pythonpackage.yaml", + gh_workflows_dir / "pythonpackage.yaml", + ) # Add in conftest.py shutil.copy(RESOURCES_DIR / "conftest.py", pkg_dir / "conftest.py") @@ -104,7 +113,9 @@ def copy_ignore(_, names): f.write("\npydra/tasks/{pkg}/auto") # rename tasks directory - (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename(pkg_dir / "pydra" / "tasks" / pkg) + (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename( + pkg_dir / "pydra" / "tasks" / pkg + ) # Replace "CHANGEME" string with pkg name for fspath in pkg_dir.glob("**/*"): @@ -125,57 +136,49 @@ def copy_ignore(_, names): callables_fspath = module_spec_dir / f"{interface}_callables.py" spec_stub = {} - def fields_stub(type_): - """Used, in conjunction with some find/replaces after dumping, to - insert comments into the YAML file""" - dct = {} - for field in attrs.fields(type_): - tp = field.type - if tp.__module__ == "builtins": - tp_name = tp.__name__ - else: - tp_name = str(tp).lower() - dct[field.name] = f"# {tp_name} - " + field.metadata['help'].replace("\n ", "\n # ") + "#" - return dct nipype_module_str = "nipype.interfaces." + ".".join(module.split("/")) nipype_module = import_module(nipype_module_str) nipype_interface = getattr(nipype_module, interface) - if not issubclass(nipype_interface, nipype.interfaces.base.core.Interface): + if not issubclass( + nipype_interface, nipype.interfaces.base.core.Interface + ): not_interfaces.append(f"{module}.{interface}") continue - spec_stub = { - "task_name": interface, - "nipype_module": nipype_module_str, - "nipype_name": None, - "inputs": fields_stub(InputsConverter), - "outputs": fields_stub(OutputsConverter), - "test": fields_stub(TestsGenerator), - "doctest": fields_stub(DocTestGenerator), - } - yaml_str = yaml.dump(spec_stub, indent=4, sort_keys=False, width=4096) - yaml_str = re.sub(r"""("|')#""", "\n #", yaml_str) - yaml_str = re.sub(r"""#("|')""", "", yaml_str) - yaml_str = yaml_str.replace("typing.", "") - yaml_str = yaml_str.replace(r"\n", "\n") - yaml_str = yaml_str.replace(" null", "") + # Generate preamble comments for file inputs_desc = "" + file_inputs = [] + genfile_outputs = [] if nipype_interface.input_spec: - for inpt_name, inpt in nipype_interface.input_spec().traits().items(): + for inpt_name, inpt in ( + nipype_interface.input_spec().traits().items() + ): if inpt_name in ("trait_added", "trait_modified"): continue - inpt_desc = inpt.desc.replace('\n', ' ') if inpt.desc else "" + inpt_desc = inpt.desc.replace("\n", " ") if inpt.desc else "" inputs_desc += f"# {inpt_name} ({type(inpt.trait_type).__name__.lower()}): {inpt_desc}\n" + if inpt.genfile: + genfile_outputs.append(inpt_name) + elif type(inpt.trait_type).__name__ in ( + "File", + "InputMultiPath", + ): + file_inputs.append(inpt_name) + file_outputs = [] outputs_desc = "" if nipype_interface.output_spec: - for outpt_name, outpt in nipype_interface.output_spec().traits().items(): - if inpt_name in ("trait_added", "trait_modified"): + for outpt_name, outpt in ( + nipype_interface.output_spec().traits().items() + ): + if outpt_name in ("trait_added", "trait_modified"): continue - outpt_desc = outpt.desc.replace('\n', ' ') if outpt.desc else "" + outpt_desc = outpt.desc.replace("\n", " ") if outpt.desc else "" outputs_desc += f"# {outpt_name} ({type(outpt.trait_type).__name__.lower()}): {outpt_desc}\n" + if type(outpt.trait_type).__name__ == "File": + file_outputs.append(outpt_name) # Create a preamble at the top of the specificaiton explaining what to do preamble = ( f"""# This file is used to manually specify the semi-automatic conversion - # of '{module}.{interface}' from Nipype to Pydra. Please fill in the empty fields + # of '{module.replace('/', '.')}.{interface}' from Nipype to Pydra. Please fill in the empty fields # below where appropriate # # Nipype Inputs Ref. @@ -185,6 +188,124 @@ def fields_stub(type_): # ------------------- {outputs_desc}\n""" ).replace(" #", "#") + + # Create "stubs" for each of the available fields + def fields_stub(name, category_class, values=None): + """Used, in conjunction with some find/replaces after dumping, to + insert comments into the YAML file""" + dct = {} + for field in attrs.fields(category_class): + field_name = f"{name}.{field.name}" + try: + val = values[field.name] + except (KeyError, TypeError): + val = ( + field.default + if ( + field.default != attrs.NOTHING + and not isinstance(field.default, attrs.Factory) + ) + else None + ) + else: + if isinstance(val, ty.Iterable) and not val: + val = None + dct[field_name] = val + return dct + + if ">>>" in nipype_interface.__doc__: + intf_name = f"{module.replace('/', '.')}.{interface}" + match = re.search( + r"""^\s+>>> (?:\w+)\.cmdline\n\s*(?:'|")?(.*)(?:'|")?\s*$""", + nipype_interface.__doc__, + flags=re.MULTILINE, + ) + if not match: + raise Exception( + f"Could not find cmdline in doctest of {intf_name}:\n{nipype_interface.__doc__}" + ) + cmdline = match.group(1) + cmdline = cmdline.replace("'", '"') + doctest_inpts = { + n: v.replace("'", '"') + for n, v in re.findall( + r"""\s+>>> (?:\w+)\.inputs\.(\w+) ?= ?(.*)\n""", + nipype_interface.__doc__, + ) + } + if not doctest_inpts: + raise Exception( + f"Could not find inpts in doctest of {intf_name}:\n{nipype_interface.__doc__}" + ) + test_inpts = { + n: re.sub(r'(")([^"]+)\1', r"\2", v) + for n, v in doctest_inpts + if n not in file_inputs + } + + doctest_stub = fields_stub( + "doctest", + DocTestGenerator, + {"cmdline": cmdline, "inputs": test_inpts}, + ) + else: + if hasattr(nipype_interface, "_cmd"): + doctest_stub = fields_stub( + "doctest", + DocTestGenerator, + {"cmdline": f"{nipype_interface._cmd} "}, + ) + else: + doctest_stub = None + test_inpts = {} + + spec_stub = { + "task_name": interface, + "nipype_module": nipype_module_str, + "nipype_name": None, + "inputs": fields_stub( + "inputs", + InputsConverter, + {"types": {i: "generic/file" for i in file_inputs}}, + ), + "outputs": fields_stub( + "outputs", + OutputsConverter, + { + "types": {o: "generic/file" for o in file_outputs}, + "templates": {o: "" for o in genfile_outputs}, + }, + ), + "test": fields_stub( + "test", TestsGenerator, {"inputs": copy(test_inpts)} + ), + "doctest": doctest_stub, + } + yaml_str = yaml.dump(spec_stub, indent=4, sort_keys=False, width=4096) + # Strip explicit nulls from dumped YAML + yaml_str = yaml_str.replace(" null", "") + # Inject comments into dumped YAML + for category_name, category_class in [ + ("inputs", InputsConverter), + ("outputs", OutputsConverter), + ("test", TestsGenerator), + ("doctest", DocTestGenerator), + ]: + for field in attrs.fields(category_class): + tp = field.type + if tp.__module__ == "builtins": + tp_name = tp.__name__ + else: + tp_name = str(tp).lower().replace("typing.", "") + comment = f" # {tp_name} - " + field.metadata[ + "help" + ].replace("\n ", "\n # ") + yaml_str = re.sub( + f" {category_name}.{field.name}:" + r"(.*)", + f" {field.name}:" + r"\1" + f"\n{comment}", + yaml_str, + ) + with open(module_spec_dir / (interface + ".yaml"), "w") as f: f.write(preamble + yaml_str) print(preamble + yaml_str) @@ -195,7 +316,9 @@ def fields_stub(type_): sp.check_call("git init", shell=True, cwd=pkg_dir) sp.check_call("git add --all", shell=True, cwd=pkg_dir) - sp.check_call('git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir) + sp.check_call( + 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir + ) sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) print("\n".join(not_interfaces)) From db5d29910a3b1732629716a57a702235692f1546 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 14:25:27 +1000 Subject: [PATCH 08/42] renamed nipype_name to new_name --- nipype2pydra/task.py | 12 +++++++----- scripts/pkg_gen/create_packages.py | 16 +++++++++------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 55ed928b..2f15a956 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -309,10 +309,10 @@ class TaskConverter: a module, or path to a module, containing any required callables """ - task_name: str + name: str nipype_module: ModuleType = attrs.field(converter=import_module_from_path) output_module: str = attrs.field(default=None) - nipype_name: str = attrs.field(default=None) + new_name: str = attrs.field(default=None) inputs: InputsConverter = attrs.field( factory=InputsConverter, converter=from_dict_to_inputs ) @@ -346,12 +346,10 @@ def __attrs_post_init__(self): f"as {self.nipype_module.__name__}.{self.task_name} (i.e. not in " "nipype.interfaces)" ) - if self.nipype_name is None: - self.nipype_name = self.task_name @property def nipype_interface(self) -> nipype.interfaces.base.BaseInterface: - return getattr(self.nipype_module, self.nipype_name) + return getattr(self.nipype_module, self.new_name) @property def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: @@ -361,6 +359,10 @@ def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: def nipype_output_spec(self) -> nipype.interfaces.base.BaseTraitedSpec: return self.nipype_interface.output_spec() + @property + def task_name(self): + return self.new_name if self.new_name is not None else self.name + def generate(self, package_root: Path): """creating pydra input/output spec from nipype specs if write is True, a pydra Task class will be written to the file together with tests diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index e5ba1c58..7d9c7fcf 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -160,7 +160,7 @@ def copy_ignore(_, names): genfile_outputs.append(inpt_name) elif type(inpt.trait_type).__name__ in ( "File", - "InputMultiPath", + "InputMultiObject", ): file_inputs.append(inpt_name) file_outputs = [] @@ -238,15 +238,17 @@ def fields_stub(name, category_class, values=None): f"Could not find inpts in doctest of {intf_name}:\n{nipype_interface.__doc__}" ) test_inpts = { - n: re.sub(r'(")([^"]+)\1', r"\2", v) - for n, v in doctest_inpts + n: re.sub(r'^(")([^"]+)\1$', r"\2", v) + for n, v in doctest_inpts.items() if n not in file_inputs } - + doctest_inpts = { + n: (None if v in file_inputs else v) for n, v in doctest_inpts.items() + } doctest_stub = fields_stub( "doctest", DocTestGenerator, - {"cmdline": cmdline, "inputs": test_inpts}, + {"cmdline": cmdline, "inputs": doctest_inpts}, ) else: if hasattr(nipype_interface, "_cmd"): @@ -260,9 +262,9 @@ def fields_stub(name, category_class, values=None): test_inpts = {} spec_stub = { - "task_name": interface, + "name": interface, "nipype_module": nipype_module_str, - "nipype_name": None, + "new_name": None, "inputs": fields_stub( "inputs", InputsConverter, From aa127980c5ed7345c3343a8dbeb50195653cdbd9 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 14:29:04 +1000 Subject: [PATCH 09/42] finished fine-tuning conversion specs --- scripts/pkg_gen/create_packages.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 7d9c7fcf..71fa51a1 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -238,12 +238,12 @@ def fields_stub(name, category_class, values=None): f"Could not find inpts in doctest of {intf_name}:\n{nipype_interface.__doc__}" ) test_inpts = { - n: re.sub(r'^(")([^"]+)\1$', r"\2", v) + n: v for n, v in doctest_inpts.items() if n not in file_inputs } doctest_inpts = { - n: (None if v in file_inputs else v) for n, v in doctest_inpts.items() + n: (None if n in file_inputs else v) for n, v in doctest_inpts.items() } doctest_stub = fields_stub( "doctest", From 79c1b9cdd09a8690dbcb7a23ca50745b8779bb1f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 14:53:32 +1000 Subject: [PATCH 10/42] added doc string to generated yaml comments --- nipype-interfaces-to-import.yaml | 2 +- nipype2pydra/task.py | 4 ++ scripts/pkg_gen/create_packages.py | 68 ++++++++++++++++++------------ 3 files changed, 46 insertions(+), 28 deletions(-) diff --git a/nipype-interfaces-to-import.yaml b/nipype-interfaces-to-import.yaml index 4cb65437..d6b5bd78 100644 --- a/nipype-interfaces-to-import.yaml +++ b/nipype-interfaces-to-import.yaml @@ -349,7 +349,7 @@ interfaces: - DICOMConvert - Resample - ReconAll - - BBRegisterInputSpec6 + - BBRegister - ApplyVolTransform - Smooth diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 2f15a956..fccfa320 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -237,6 +237,10 @@ class DocTestGenerator: If the field is of file-format type and the value is None, then the '.mock()' method of the corresponding class is used instead."""} ) + directive: str = attrs.field( + default=None, metadata={ + "help": "any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS"} + ) T = ty.TypeVar("T") diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 71fa51a1..86b6e7d0 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -9,6 +9,7 @@ import tarfile from pathlib import Path import attrs +from warnings import warn import requests import click import yaml @@ -175,18 +176,23 @@ def copy_ignore(_, names): outputs_desc += f"# {outpt_name} ({type(outpt.trait_type).__name__.lower()}): {outpt_desc}\n" if type(outpt.trait_type).__name__ == "File": file_outputs.append(outpt_name) + doc_string = nipype_interface.__doc__ if nipype_interface.__doc__ else "" + doc_string = doc_string.replace("\n", "\n# ") # Create a preamble at the top of the specificaiton explaining what to do preamble = ( f"""# This file is used to manually specify the semi-automatic conversion # of '{module.replace('/', '.')}.{interface}' from Nipype to Pydra. Please fill in the empty fields # below where appropriate # - # Nipype Inputs Ref. - # ------------------ + # Inputs + # ------ {inputs_desc}# - # Nipype Outputs Ref. - # ------------------- - {outputs_desc}\n""" + # Outputs + # ------- + {outputs_desc}# + # Docs + # ---- + # {doc_string}\n""" ).replace(" #", "#") # Create "stubs" for each of the available fields @@ -213,19 +219,18 @@ def fields_stub(name, category_class, values=None): dct[field_name] = val return dct - if ">>>" in nipype_interface.__doc__: - intf_name = f"{module.replace('/', '.')}.{interface}" + if nipype_interface.__doc__ and ">>>" in nipype_interface.__doc__: match = re.search( - r"""^\s+>>> (?:\w+)\.cmdline\n\s*(?:'|")?(.*)(?:'|")?\s*$""", + r"""^\s+>>> (?:\w+)\.cmdline(\s*# doctest: .*)?\n\s*(?:'|")?(.*)(?:'|")?\s*$""", nipype_interface.__doc__, flags=re.MULTILINE, ) - if not match: - raise Exception( - f"Could not find cmdline in doctest of {intf_name}:\n{nipype_interface.__doc__}" - ) - cmdline = match.group(1) - cmdline = cmdline.replace("'", '"') + if match: + cmdline = match.group(2) + cmdline = cmdline.replace("'", '"') + directive = match.group(1) + else: + cmdline = directive = None doctest_inpts = { n: v.replace("'", '"') for n, v in re.findall( @@ -234,21 +239,31 @@ def fields_stub(name, category_class, values=None): ) } if not doctest_inpts: - raise Exception( - f"Could not find inpts in doctest of {intf_name}:\n{nipype_interface.__doc__}" - ) - test_inpts = { - n: v - for n, v in doctest_inpts.items() - if n not in file_inputs - } - doctest_inpts = { - n: (None if n in file_inputs else v) for n, v in doctest_inpts.items() - } + doctest_inpts = { + n: v.replace("'", '"') + for n, v in re.findall( + r"""\.\.\.\s+(\w+)=(.*) *\n""", + nipype_interface.__doc__, + ) + } + if doctest_inpts: + test_inpts = { + n: v + for n, v in doctest_inpts.items() + if n not in file_inputs + } + doctest_inpts = { + n: (None if n in file_inputs else v) for n, v in doctest_inpts.items() + } + else: + intf_name = f"{module.replace('/', '.')}.{interface}" + warn(f"Could not parse doctest for {intf_name}:\n{nipype_interface.__doc__}") + test_inpts = {} + doctest_inpts = {} doctest_stub = fields_stub( "doctest", DocTestGenerator, - {"cmdline": cmdline, "inputs": doctest_inpts}, + {"cmdline": cmdline, "inputs": doctest_inpts, "directive": directive}, ) else: if hasattr(nipype_interface, "_cmd"): @@ -310,7 +325,6 @@ def fields_stub(name, category_class, values=None): with open(module_spec_dir / (interface + ".yaml"), "w") as f: f.write(preamble + yaml_str) - print(preamble + yaml_str) with open(callables_fspath, "w") as f: f.write( f'"""Module to put any functions that are referred to in {interface}.yaml"""\n' From ce1111fa75ec3bef8a07a58872fc1dbf5aee2d95 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 14:55:36 +1000 Subject: [PATCH 11/42] removed stray input/output interfaces from to-import list --- nipype-interfaces-to-import.yaml | 42 -------------------------------- 1 file changed, 42 deletions(-) diff --git a/nipype-interfaces-to-import.yaml b/nipype-interfaces-to-import.yaml index d6b5bd78..535ae29b 100644 --- a/nipype-interfaces-to-import.yaml +++ b/nipype-interfaces-to-import.yaml @@ -349,7 +349,6 @@ interfaces: - DICOMConvert - Resample - ReconAll - - BBRegister - ApplyVolTransform - Smooth @@ -448,54 +447,30 @@ interfaces: - Classifier - Cleaner fsl/maths: - - MathsInput - - MathsOutput - MathsCommand - - ChangeDataTypeInput - ChangeDataType - Threshold - - StdImageInput - StdImage - - MeanImageInput - MeanImage - - MaxImageInput - MaxImage - - PercentileImageInput - PercentileImage - - MaxnImageInput - MaxnImage - - MinImageInput - MinImage - - MedianImageInput - MedianImage - - AR1ImageInput - AR1Image - - IsotropicSmoothInput - IsotropicSmooth - - ApplyMaskInput - ApplyMask - - KernelInput - - DilateInput - DilateImage - - ErodeInput - ErodeImage - - SpatialFilterInput - SpatialFilter - - UnaryMathsInput - UnaryMaths - - BinaryMathsInput - BinaryMaths - - MultiImageMathsInput - MultiImageMaths - - TemporalFilterInput - TemporalFilter fsl/model: - Level1Design - FEAT - FEATModel - - FILMGLSInputSpec505 - - FILMGLSInputSpec507 - - FILMGLSOutputSpec507 - FILMGLS - FEATRegister - FLAMEO @@ -629,35 +604,22 @@ interfaces: niftyseg/em: - EM niftyseg/label_fusion: - - LabelFusionInput - - LabelFusionOutput - LabelFusion - CalcTopNCC niftyseg/lesions: - FillLesions niftyseg/maths: - - MathsInput - - MathsOutput - MathsCommand - - UnaryMathsInput - UnaryMaths - - BinaryMathsInput - BinaryMaths - - BinaryMathsInputInteger - BinaryMathsInteger - - TupleMathsInput - TupleMaths - - MergeInput - Merge niftyseg/patchmatch: - PatchMatch niftyseg/stats: - - StatsInput - - StatsOutput - StatsCommand - - UnaryStatsInput - UnaryStats - - BinaryStatsInput - BinaryStats nilearn: - NilearnBaseInterface @@ -917,11 +879,7 @@ interfaces: - CalcCoregAffine - ApplyTransform - Reslice - - ApplyInverseDeformationInput - - ApplyInverseDeformationOutput - ApplyInverseDeformation - - ResliceToReferenceInput - - ResliceToReferenceOutput - ResliceToReference - DicomImport vista/vista: From c52d74e13787fbad038f3a0538dcdf3cb11b4c1d Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 26 Jul 2023 18:11:21 +1000 Subject: [PATCH 12/42] touching up create_packages --- scripts/pkg_gen/create_packages.py | 124 ++++++++++++++++++++++++----- 1 file changed, 104 insertions(+), 20 deletions(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 86b6e7d0..01419a47 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -13,6 +13,13 @@ import requests import click import yaml +import fileformats.core.utils +import fileformats.core.mixin +from fileformats.generic import File +from fileformats.medimage import Nifti1, NiftiGz, Bval, Bvec +from fileformats.text import Txt +from fileformats.numeric import MatlabMatrix, DataFile +from fileformats.serialization import Xml import nipype.interfaces.base.core from nipype2pydra.task import ( InputsConverter, @@ -24,6 +31,8 @@ RESOURCES_DIR = Path(__file__).parent / "resources" +EXPECTED_FORMATS = [Nifti1, NiftiGz, Txt, MatlabMatrix, DataFile, Xml] + def download_tasks_template(output_path: Path): """Downloads the latest pydra-tasks-template to the output path""" @@ -80,6 +89,8 @@ def generate_packages( output_dir.mkdir() not_interfaces = [] + unmatched_formats = [] + ambiguous_formats = [] for pkg in to_import["packages"]: pkg_dir = output_dir / f"pydra-{pkg}" @@ -156,7 +167,7 @@ def copy_ignore(_, names): if inpt_name in ("trait_added", "trait_modified"): continue inpt_desc = inpt.desc.replace("\n", " ") if inpt.desc else "" - inputs_desc += f"# {inpt_name} ({type(inpt.trait_type).__name__.lower()}): {inpt_desc}\n" + inputs_desc += f"# {inpt_name} : {type(inpt.trait_type).__name__.lower()}\n# {inpt_desc}\n" if inpt.genfile: genfile_outputs.append(inpt_name) elif type(inpt.trait_type).__name__ in ( @@ -173,16 +184,19 @@ def copy_ignore(_, names): if outpt_name in ("trait_added", "trait_modified"): continue outpt_desc = outpt.desc.replace("\n", " ") if outpt.desc else "" - outputs_desc += f"# {outpt_name} ({type(outpt.trait_type).__name__.lower()}): {outpt_desc}\n" + outputs_desc += f"# {outpt_name} : {type(outpt.trait_type).__name__.lower()}\n# {outpt_desc}\n" if type(outpt.trait_type).__name__ == "File": file_outputs.append(outpt_name) - doc_string = nipype_interface.__doc__ if nipype_interface.__doc__ else "" + doc_string = ( + nipype_interface.__doc__ if nipype_interface.__doc__ else "" + ) doc_string = doc_string.replace("\n", "\n# ") # Create a preamble at the top of the specificaiton explaining what to do preamble = ( - f"""# This file is used to manually specify the semi-automatic conversion - # of '{module.replace('/', '.')}.{interface}' from Nipype to Pydra. Please fill in the empty fields - # below where appropriate + f"""# This file is used to manually specify the semi-automatic conversion of + # '{module.replace('/', '.')}.{interface}' from Nipype to Pydra. + # + # Please fill-in/edit the fields below where appropriate # # Inputs # ------ @@ -219,6 +233,11 @@ def fields_stub(name, category_class, values=None): dct[field_name] = val return dct + input_types = {i: "generic/file" for i in file_inputs} + output_types = {o: "generic/file" for o in file_outputs} + + # Attempt to parse doctest to pull out sensible defaults for input/output + # values if nipype_interface.__doc__ and ">>>" in nipype_interface.__doc__: match = re.search( r"""^\s+>>> (?:\w+)\.cmdline(\s*# doctest: .*)?\n\s*(?:'|")?(.*)(?:'|")?\s*$""", @@ -246,24 +265,86 @@ def fields_stub(name, category_class, values=None): nipype_interface.__doc__, ) } + if not doctest_inpts: + match = re.search( + interface + r"""\(((? 1: + non_adjacent = [f for f in possible_formats if not issubclass(f, fileformats.core.mixin.WithAdjacentFiles)] + if non_adjacent: + possible_formats = non_adjacent + if len(possible_formats) > 1: + possible_formats = sorted(possible_formats, key=lambda f: f.__name__) + ambiguous_formats.append(possible_formats) + return possible_formats[0] + + input_types = { + n: guess_format_from_doctest(n).mime_like + for n in input_types + } + output_types = { + n: guess_format_from_doctest(n).mime_like + for n in output_types + } + test_inpts = { n: v for n, v in doctest_inpts.items() if n not in file_inputs } doctest_inpts = { - n: (None if n in file_inputs else v) for n, v in doctest_inpts.items() + n: (None if n in file_inputs else v) + for n, v in doctest_inpts.items() } else: intf_name = f"{module.replace('/', '.')}.{interface}" - warn(f"Could not parse doctest for {intf_name}:\n{nipype_interface.__doc__}") + warn( + f"Could not parse doctest for {intf_name}:\n{nipype_interface.__doc__}" + ) test_inpts = {} doctest_inpts = {} doctest_stub = fields_stub( "doctest", DocTestGenerator, - {"cmdline": cmdline, "inputs": doctest_inpts, "directive": directive}, + { + "cmdline": cmdline, + "inputs": doctest_inpts, + "directive": directive, + }, ) else: if hasattr(nipype_interface, "_cmd"): @@ -283,14 +364,14 @@ def fields_stub(name, category_class, values=None): "inputs": fields_stub( "inputs", InputsConverter, - {"types": {i: "generic/file" for i in file_inputs}}, + {"types": input_types}, ), "outputs": fields_stub( "outputs", OutputsConverter, { - "types": {o: "generic/file" for o in file_outputs}, - "templates": {o: "" for o in genfile_outputs}, + "types": output_types, + "templates": {o: None for o in genfile_outputs}, }, ), "test": fields_stub( @@ -330,14 +411,17 @@ def fields_stub(name, category_class, values=None): f'"""Module to put any functions that are referred to in {interface}.yaml"""\n' ) - sp.check_call("git init", shell=True, cwd=pkg_dir) - sp.check_call("git add --all", shell=True, cwd=pkg_dir) - sp.check_call( - 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir - ) - sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) - - print("\n".join(not_interfaces)) + # sp.check_call("git init", shell=True, cwd=pkg_dir) + # sp.check_call("git add --all", shell=True, cwd=pkg_dir) + # sp.check_call( + # 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir + # ) + # sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) + + print("Unmatched formats") + print("\n".join(unmatched_formats)) + print("\nAmbiguous formats") + print("\n".join(str(p) for p in ambiguous_formats)) if __name__ == "__main__": From 23eae0c55d7389e58225545a6e751c62f292334f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 27 Jul 2023 11:15:38 +1000 Subject: [PATCH 13/42] updated to use fileformats-datascience package --- scripts/pkg_gen/create_packages.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 01419a47..54c20af0 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -18,7 +18,7 @@ from fileformats.generic import File from fileformats.medimage import Nifti1, NiftiGz, Bval, Bvec from fileformats.text import Txt -from fileformats.numeric import MatlabMatrix, DataFile +from fileformats.datascience import MatFile, DatFile from fileformats.serialization import Xml import nipype.interfaces.base.core from nipype2pydra.task import ( @@ -31,7 +31,7 @@ RESOURCES_DIR = Path(__file__).parent / "resources" -EXPECTED_FORMATS = [Nifti1, NiftiGz, Txt, MatlabMatrix, DataFile, Xml] +EXPECTED_FORMATS = [Nifti1, NiftiGz, Txt, MatFile, DatFile, Xml] def download_tasks_template(output_path: Path): From 30751fdbd3d70178d72313141f59dff171425e65 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 27 Jul 2023 11:39:14 +1000 Subject: [PATCH 14/42] updated generate_sample_data method name --- conftest.py | 4 ++-- scripts/pkg_gen/create_packages.py | 18 +++++++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/conftest.py b/conftest.py index 12c1180a..38883a4e 100644 --- a/conftest.py +++ b/conftest.py @@ -13,8 +13,8 @@ EXAMPLE_WORKFLOWS_DIR = EXAMPLE_SPECS_DIR / "workflow" -@File.generate_test_data.register -def file_generate_test_data(file: File, dest_dir: Path): +@File.generate_sample_data.register +def file_generate_sample_data(file: File, dest_dir: Path): a_file = dest_dir / "a_file.x" a_file.write_text("a sample file") return [a_file] diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 54c20af0..2031a550 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -357,6 +357,22 @@ def guess_format_from_doctest(field): doctest_stub = None test_inpts = {} + output_templates = {} + for outpt in genfile_outputs: + try: + template = test_inpts[outpt] + except KeyError: + try: + frmt = output_types[outpt] + except KeyError: + ext = "" + else: + ext = fileformats.core.utils.from_mime(frmt).ext + if not ext: + ext = "" + template = outpt + ext + output_templates[outpt] = template + spec_stub = { "name": interface, "nipype_module": nipype_module_str, @@ -371,7 +387,7 @@ def guess_format_from_doctest(field): OutputsConverter, { "types": output_types, - "templates": {o: None for o in genfile_outputs}, + "templates": output_templates, }, ), "test": fields_stub( From d2357ac7e393c0ead86d0b7722f5283ee064b805 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 27 Jul 2023 12:23:32 +1000 Subject: [PATCH 15/42] create_packages now reads every docstring --- scripts/pkg_gen/create_packages.py | 60 +++++++++++++++++++++++------- 1 file changed, 47 insertions(+), 13 deletions(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 2031a550..ca5b3d4a 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -17,6 +17,7 @@ import fileformats.core.mixin from fileformats.generic import File from fileformats.medimage import Nifti1, NiftiGz, Bval, Bvec +from fileformats.misc import Dicom from fileformats.text import Txt from fileformats.datascience import MatFile, DatFile from fileformats.serialization import Xml @@ -265,13 +266,23 @@ def fields_stub(name, category_class, values=None): nipype_interface.__doc__, ) } + if doctest_inpts: + match = re.search( + interface + r"""\((? 1: - non_adjacent = [f for f in possible_formats if not issubclass(f, fileformats.core.mixin.WithAdjacentFiles)] + non_adjacent = [ + f + for f in possible_formats + if not issubclass( + f, fileformats.core.mixin.WithAdjacentFiles + ) + ] if non_adjacent: possible_formats = non_adjacent if len(possible_formats) > 1: - possible_formats = sorted(possible_formats, key=lambda f: f.__name__) + possible_formats = sorted( + possible_formats, key=lambda f: f.__name__ + ) ambiguous_formats.append(possible_formats) return possible_formats[0] @@ -427,15 +452,24 @@ def guess_format_from_doctest(field): f'"""Module to put any functions that are referred to in {interface}.yaml"""\n' ) - # sp.check_call("git init", shell=True, cwd=pkg_dir) - # sp.check_call("git add --all", shell=True, cwd=pkg_dir) - # sp.check_call( - # 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir - # ) - # sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) + sp.check_call("git init", shell=True, cwd=pkg_dir) + sp.check_call("git add --all", shell=True, cwd=pkg_dir) + sp.check_call( + 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir + ) + sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) + + unmatched_extensions = set( + File.decompose_fspath( + f.split(":")[1].strip(), mode=File.ExtensionDecomposition.single + )[2] + for f in unmatched_formats + ) - print("Unmatched formats") + print("Unmatched test input formats") print("\n".join(unmatched_formats)) + print("Unmatched format extensions") + print("\n".join(sorted(unmatched_extensions))) print("\nAmbiguous formats") print("\n".join(str(p) for p in ambiguous_formats)) From 67b5c97bb74531d2cdb6701afc7c9d5f99255ddf Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 27 Jul 2023 15:29:12 +1000 Subject: [PATCH 16/42] debugging created packages --- nipype2pydra/task.py | 36 +++++++++++-------- nipype2pydra/utils.py | 10 ++++++ scripts/pkg_gen/create_packages.py | 4 +-- .../pkg_gen/resources/nipype-auto-convert.py | 18 ++++++---- 4 files changed, 46 insertions(+), 22 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index fccfa320..7e457468 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -7,14 +7,15 @@ import inspect import black import traits.trait_types +import json import attrs from attrs.converters import default_if_none import nipype.interfaces.base from nipype.interfaces.base import traits_extension from pydra.engine import specs from pydra.engine.helpers import ensure_list -from .utils import import_module_from_path -from fileformats.core import DataType, FileSet +from .utils import import_module_from_path, is_fileset +from fileformats.core import DataType def str_to_type(type_str: str) -> type: @@ -192,7 +193,8 @@ class TestsGenerator: }, ) expected_outputs: ty.Dict[str, str] = attrs.field( - factory=dict, converter=default_if_none(factory=dict), # type: ignore + factory=dict, + converter=default_if_none(factory=dict), # type: ignore metadata={ "help": """expected values for selected outputs, noting that tests will typically be terminated before they complete for time-saving reasons, and therefore @@ -206,13 +208,14 @@ class TestsGenerator: after which the test will be considered to have been initialised successfully. Set to 0 to disable the timeout (warning, this could lead to the unittests taking a very long time to complete)""" - } + }, ) xfail: bool = attrs.field( default=True, metadata={ "help": """whether the unittest is expected to fail or not. Set to false - when you are satisfied with the edits you have made to this file"""} + when you are satisfied with the edits you have made to this file""" + }, ) @@ -235,11 +238,14 @@ class DocTestGenerator: metadata={ "help": """name-value pairs for inputs to be provided to the doctest. If the field is of file-format type and the value is None, then the - '.mock()' method of the corresponding class is used instead."""} + '.mock()' method of the corresponding class is used instead.""" + }, ) directive: str = attrs.field( - default=None, metadata={ - "help": "any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS"} + default=None, + metadata={ + "help": "any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS" + }, ) @@ -314,7 +320,9 @@ class TaskConverter: """ name: str - nipype_module: ModuleType = attrs.field(converter=import_module_from_path) + nipype_module: ModuleType = attrs.field( + converter=lambda m: import_module(m) if not isinstance(m, ModuleType) else m + ) output_module: str = attrs.field(default=None) new_name: str = attrs.field(default=None) inputs: InputsConverter = attrs.field( @@ -353,7 +361,7 @@ def __attrs_post_init__(self): @property def nipype_interface(self) -> nipype.interfaces.base.BaseInterface: - return getattr(self.nipype_module, self.new_name) + return getattr(self.nipype_module, self.name) @property def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: @@ -386,7 +394,7 @@ def generate(self, package_root: Path): .with_suffix(".py") ) testdir = output_file.parent / "tests" - testdir.mkdir(parents=True) + testdir.mkdir(parents=True, exist_ok=True) self.write_task( output_file, @@ -700,10 +708,10 @@ def write_test(self, filename_test, input_fields, nonstd_types, run=False): value = self.test.inputs[nm] except KeyError: if len(field) == 4: # field has default - value = field[2] + value = json.dumps(field[2]) else: assert len(field) == 3 - if inspect.isclass(tp) and issubclass(tp, FileSet): + if is_fileset(tp): value = f"{tp.__name__}.sample()" else: trait = self.nipype_interface.input_spec.class_traits()[nm] @@ -747,7 +755,7 @@ def create_doctest(self, input_fields, nonstd_types): try: val = self.doctest.inputs[nm] except KeyError: - if inspect.isclass(tp) and issubclass(tp, FileSet): + if is_fileset(tp): val = f"{tp.__name__}.mock()" else: val = attrs.NOTHING diff --git a/nipype2pydra/utils.py b/nipype2pydra/utils.py index 35e048aa..dd2ed7b7 100644 --- a/nipype2pydra/utils.py +++ b/nipype2pydra/utils.py @@ -3,8 +3,10 @@ from types import ModuleType import sys import os +import inspect from contextlib import contextmanager from pathlib import Path +from fileformats.core import FileSet from importlib import import_module @@ -63,3 +65,11 @@ def add_to_sys_path(path: Path): yield sys.path finally: sys.path.pop(0) + + +def is_fileset(tp: type): + return ( + inspect.isclass(tp) + and type(tp) is not ty.GenericAlias + and issubclass(tp, FileSet) + ) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index ca5b3d4a..b5e43dc4 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -143,8 +143,8 @@ def copy_ignore(_, names): for module, interfaces in to_import["interfaces"].items(): if module.split("/")[0] != pkg: continue - module_spec_dir = specs_dir.joinpath(*module.split("/")) - module_spec_dir.mkdir(parents=True) + module_spec_dir = specs_dir.joinpath(*module.split("/")[1:]) + module_spec_dir.mkdir(parents=True, exist_ok=True) for interface in interfaces: callables_fspath = module_spec_dir / f"{interface}_callables.py" spec_stub = {} diff --git a/scripts/pkg_gen/resources/nipype-auto-convert.py b/scripts/pkg_gen/resources/nipype-auto-convert.py index 22dec3c1..227c9ba1 100644 --- a/scripts/pkg_gen/resources/nipype-auto-convert.py +++ b/scripts/pkg_gen/resources/nipype-auto-convert.py @@ -38,24 +38,30 @@ with open(fspath) as f: spec = yaml.load(f, Loader=yaml.SafeLoader) - rel_pkg_path = str(fspath.relative_to(SPECS_DIR)).replace(os.path.sep, ".") + rel_pkg_path = ( + str(fspath.parent.relative_to(SPECS_DIR)).replace(os.path.sep, ".") + + "." + + fspath.stem + ) callables = import_module(rel_pkg_path + "_callables") - module_name = fspath.name.lower() + module_name = fspath.stem.lower() converter = TaskConverter( output_module=f"pydra.tasks.{PKG_NAME}.auto.{module_name}", callables_module=callables, # type: ignore - **spec + **spec, ) converter.generate(PKG_ROOT) - auto_init += f"from .{module_name} import {spec['task_name']}\n" + auto_init += f"from .{module_name} import {converter.task_name}\n" with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "_version.py", "w") as f: - f.write(f"""# Auto-generated by {__file__}, do not edit as it will be overwritten + f.write( + f"""# Auto-generated by {__file__}, do not edit as it will be overwritten auto_version = {auto_version} -""") +""" + ) with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "__init__.py", "w") as f: f.write(auto_init) From bb3e3692f4308c92bb19c2560ee511375d8684b2 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Sat, 29 Jul 2023 14:14:03 +1000 Subject: [PATCH 17/42] changed parsing of union file types --- nipype2pydra/task.py | 68 ++++++++++++++++++++++++-------------------- 1 file changed, 37 insertions(+), 31 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 7e457468..8713142c 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -21,12 +21,16 @@ def str_to_type(type_str: str) -> type: """Resolve a string representation of a type into a valid type""" if "/" in type_str: - tp = DataType.from_mime(type_str) - try: - # If datatype is a field, use its primitive instead - tp = tp.primitive # type: ignore - except AttributeError: - pass + if "," in type_str: + union_tps = tuple(str_to_type(p) for p in type_str.split(",")) + tp: ty.Type[ty.Union] = ty.Union.__getitem__(union_tps) # type: ignore + else: + tp = DataType.from_mime(type_str) + try: + # If datatype is a field, use its primitive instead + tp = tp.primitive # type: ignore + except AttributeError: + pass elif "." in type_str: parts = type_str.split(".") module = import_module(".".join(parts[:-1])) @@ -46,13 +50,7 @@ def types_converter(types: ty.Dict[str, ty.Union[str, type]]) -> ty.Dict[str, ty converted = {} for name, tp_or_str in types.items(): if isinstance(tp_or_str, str): - if tp_or_str.startswith("union:"): - union_tps = tuple( - str_to_type(p) for p in tp_or_str[len("union:") :].split(",") - ) - tp: ty.Type[ty.Union] = ty.Union.__getitem__(union_tps) # type: ignore - else: - tp = str_to_type(tp_or_str) + tp = str_to_type(tp_or_str) converted[name] = tp return converted @@ -167,7 +165,7 @@ def callables_validator(self, _, output_callables: dict): @attrs.define -class TestsGenerator: +class TestGenerator: """Specifications for the automatically generated test for the generated Nipype spec Parameters @@ -279,15 +277,16 @@ def from_dict_to_outputs(obj: ty.Union[OutputsConverter, dict]) -> OutputsConver return from_dict_converter(obj, OutputsConverter) -def from_dict_to_test(obj: ty.Union[TestsGenerator, dict]) -> TestsGenerator: - return from_dict_converter(obj, TestsGenerator) +def from_list_to_tests(obj: ty.Union[ty.List[TestGenerator], list]) -> ty.List[TestGenerator]: + if obj is None: + return [] + return [from_dict_converter(t, TestGenerator) for t in obj] -def from_dict_to_doctest(obj: ty.Union[DocTestGenerator, dict]) -> DocTestGenerator: - converted = from_dict_converter(obj, DocTestGenerator, allow_none=True) - if converted.inputs is None: - converted = None - return converted +def from_list_to_doctests(obj: ty.Union[ty.List[DocTestGenerator], list]) -> ty.List[DocTestGenerator]: + if obj is None: + return [] + return [from_dict_converter(t, DocTestGenerator) for t in obj] @attrs.define @@ -309,10 +308,10 @@ class TaskConverter: specficiations for the conversion of inputs outputs: OutputsConverter or dict specficiations for the conversion of inputs - test: TestsGenerator or dict, optional + tests: ty.List[TestGenerator] or list, optional specficiations for how to construct the test. A default test is generated if no specs are provided - doctest: DocTestGenerator or dict, optional + doctests: ty.List[DocTestGenerator] or list, optional specifications for how to construct the docttest. Doctest is omitted if not provided callables_module: ModuleType or str, optional @@ -335,11 +334,11 @@ class TaskConverter: callables_module: ModuleType = attrs.field( converter=import_module_from_path, default=None ) - test: TestsGenerator = attrs.field( # type: ignore - factory=TestsGenerator, converter=from_dict_to_test + tests: ty.List[TestGenerator] = attrs.field( # type: ignore + factory=list, converter=from_list_to_tests ) - doctest: ty.Optional[DocTestGenerator] = attrs.field( - default=None, converter=from_dict_to_doctest + doctests: ty.List[DocTestGenerator] = attrs.field( + factory=list, converter=from_list_to_doctests ) def __attrs_post_init__(self): @@ -676,6 +675,7 @@ def types_to_names(spec_fields): for tp_repl in self.TYPE_REPLACE: spec_str = spec_str.replace(*tp_repl) + spec_str = re.sub(r'"TYPE_(\w+)"', r"\1", spec_str) spec_str_black = black.format_file_contents( spec_str, fast=False, mode=black.FileMode() @@ -686,13 +686,13 @@ def types_to_names(spec_fields): @staticmethod def import_types(nonstd_types: ty.List[type], prefix="") -> str: - imports = "import typing as ty\nfrom pathlib import Path\n" + imports = f"{prefix}import typing as ty\n{prefix}from pathlib import Path\n" for tp in nonstd_types: imports += f"{prefix}from {tp.__module__} import {tp.__name__}\n" return imports def write_test(self, filename_test, input_fields, nonstd_types, run=False): - spec_str = "import os, pytest \n" + spec_str = "import os\nimport pytest\n" spec_str += self.import_types(nonstd_types=nonstd_types) spec_str += f"from {self.output_module} import {self.task_name}\n" spec_str += "\n" @@ -708,7 +708,10 @@ def write_test(self, filename_test, input_fields, nonstd_types, run=False): value = self.test.inputs[nm] except KeyError: if len(field) == 4: # field has default - value = json.dumps(field[2]) + if isinstance(field[2], bool): + value = str(field[2]) + else: + value = json.dumps(field[2]) else: assert len(field) == 3 if is_fileset(tp): @@ -733,6 +736,8 @@ def write_test(self, filename_test, input_fields, nonstd_types, run=False): value = attrs.NOTHING if value is not attrs.NOTHING: spec_str += f" task.inputs.{nm} = {value}\n" + if hasattr(self.nipype_interface, "_cmd"): + spec_str += r' print(f"CMDLINE: {task.cmdline}\n\n")' + "\n" spec_str += " res = task()\n" spec_str += " print('RESULT: ', res)\n" for name, value in self.test.expected_outputs.items(): @@ -749,6 +754,7 @@ def create_doctest(self, input_fields, nonstd_types): """adding doctests to the interfaces""" doctest = ' """\n Example\n -------\n' doctest += self.import_types(nonstd_types, prefix=" >>> ") + doctest += f" >>> from {self.output_module} import {self.task_name}\n" doctest += f" >>> task = {self.task_name}()\n" for field in input_fields: nm, tp = field[:2] @@ -795,7 +801,7 @@ def create_doctest(self, input_fields, nonstd_types): ] TYPE_REPLACE = [ - ("'TYPE_File'", "specs.File"), + ("'TYPE_File'", "File"), ("'TYPE_bool'", "bool"), ("'TYPE_str'", "str"), ("'TYPE_Any'", "ty.Any"), From a299ec1b563725f7350f8b7a111c2b67c7a0f95b Mon Sep 17 00:00:00 2001 From: Tom Close Date: Sat, 29 Jul 2023 14:14:21 +1000 Subject: [PATCH 18/42] added timeout_pass decorator --- scripts/pkg_gen/resources/conftest.py | 51 +++++++++++++++++++-------- 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/scripts/pkg_gen/resources/conftest.py b/scripts/pkg_gen/resources/conftest.py index 89d3215f..9ce8ebb1 100644 --- a/scripts/pkg_gen/resources/conftest.py +++ b/scripts/pkg_gen/resources/conftest.py @@ -4,6 +4,7 @@ import logging from pathlib import Path import tempfile +import threading import pytest @@ -50,22 +51,42 @@ def work_dir(): return Path(work_dir) -def pytest_configure(config): - config.addinivalue_line( - "markers", "timeout_pass: mark test as passing if it runs until timeout" - ) +def timeout_pass(timeout): + """Cancel the test after a certain period, after which it is assumed that the arguments + passed to the underying command have passed its internal validation (so we don't have + to wait until the tool completes) + Parameters + ---------- + timeout : int + the number of seconds to wait until cancelling the test + """ + def decorator(test_func): + def wrapper(*args, **kwargs): + result = [None] + exception = [False] + timeout_event = threading.Event() -def pytest_runtest_protocol(item, nextitem): - marker = item.get_closest_marker("timeout_pass") - if marker is not None: - timeout = marker.kwargs.get("timeout", None) - if timeout is not None: - start_time = time.time() - timeout_duration = timeout + def test_runner(): + try: + result[0] = test_func(*args, **kwargs) + except Exception: + exception[0] = True - while time.time() - start_time < timeout_duration: - nextitem() - pytest.xpass(f"Test passed by running through the timeout of {timeout_duration} seconds.") + thread = threading.Thread(target=test_runner) + thread.start() + timeout_event.wait(timeout) - return None + if thread.is_alive(): + timeout_event.set() + thread.join() + return result[0] + + if exception[0]: + raise Exception("Test raised an exception during execution.") + + return result[0] + + return wrapper + + return decorator From 5deffbca9609dac63f121a7dc8d7924a3a49b7a2 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Sat, 29 Jul 2023 14:14:45 +1000 Subject: [PATCH 19/42] moved github workflows into separate directory --- scripts/pkg_gen/resources/{ => gh_workflows}/auto-release.yaml | 2 +- scripts/pkg_gen/resources/{ => gh_workflows}/pythonpackage.yaml | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename scripts/pkg_gen/resources/{ => gh_workflows}/auto-release.yaml (94%) rename scripts/pkg_gen/resources/{ => gh_workflows}/pythonpackage.yaml (100%) diff --git a/scripts/pkg_gen/resources/auto-release.yaml b/scripts/pkg_gen/resources/gh_workflows/auto-release.yaml similarity index 94% rename from scripts/pkg_gen/resources/auto-release.yaml rename to scripts/pkg_gen/resources/gh_workflows/auto-release.yaml index 17530f79..6ae1d61b 100644 --- a/scripts/pkg_gen/resources/auto-release.yaml +++ b/scripts/pkg_gen/resources/gh_workflows/auto-release.yaml @@ -33,4 +33,4 @@ jobs: "draft": false, "prerelease": false }' \ - "https://api.github.com/repos/$REPO_OWNER/$REPO_NAME/releases" \ No newline at end of file + "https://api.github.com/repos/nipype/pydra-#PACKAGE#/releases" \ No newline at end of file diff --git a/scripts/pkg_gen/resources/pythonpackage.yaml b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml similarity index 100% rename from scripts/pkg_gen/resources/pythonpackage.yaml rename to scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml From 832225eea90a1d87a41521b8064f5a3b6ae10148 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Sat, 29 Jul 2023 14:15:04 +1000 Subject: [PATCH 20/42] added auto-convert requirements --- .../resources/nipype-auto-convert-requirements.txt | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt diff --git a/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt b/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt new file mode 100644 index 00000000..7067fce1 --- /dev/null +++ b/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt @@ -0,0 +1,9 @@ +black +attrs>=22.1.0 +nipype +pydra +PyYAML>=6.0 +fileformats >=0.8 +fileformats-medimage >=0.4 +traits +nipype2pydra \ No newline at end of file From 0418bd265410187e654033ca0567e4af9d77e115 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Sat, 29 Jul 2023 14:15:17 +1000 Subject: [PATCH 21/42] changed create_packages to read multiple doctests --- scripts/pkg_gen/create_packages.py | 513 +++++++++++++++++------------ 1 file changed, 295 insertions(+), 218 deletions(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index b5e43dc4..36107f08 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -25,7 +25,7 @@ from nipype2pydra.task import ( InputsConverter, OutputsConverter, - TestsGenerator, + TestGenerator, DocTestGenerator, ) @@ -94,61 +94,24 @@ def generate_packages( ambiguous_formats = [] for pkg in to_import["packages"]: - pkg_dir = output_dir / f"pydra-{pkg}" - - def copy_ignore(_, names): - return [n for n in names if n in (".git", "__pycache__", ".pytest_cache")] - - shutil.copytree(task_template, pkg_dir, ignore=copy_ignore) - - # Setup script to auto-convert nipype interfaces - auto_conv_dir = pkg_dir / "nipype-auto-conv" - specs_dir = auto_conv_dir / "specs" - specs_dir.mkdir(parents=True) - shutil.copy( - RESOURCES_DIR / "nipype-auto-convert.py", auto_conv_dir / "generate" - ) - os.chmod(auto_conv_dir / "generate", 0o755) # make executable - - # Setup GitHub workflows - gh_workflows_dir = pkg_dir / ".github" / "workflows" - gh_workflows_dir.mkdir(parents=True, exist_ok=True) - shutil.copy( - RESOURCES_DIR / "pythonpackage.yaml", - gh_workflows_dir / "pythonpackage.yaml", - ) - - # Add in conftest.py - shutil.copy(RESOURCES_DIR / "conftest.py", pkg_dir / "conftest.py") - - # Add "pydra.tasks..auto to gitignore" - with open(pkg_dir / ".gitignore", "a") as f: - f.write("\npydra/tasks/{pkg}/auto") - - # rename tasks directory - (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename( - pkg_dir / "pydra" / "tasks" / pkg - ) - - # Replace "CHANGEME" string with pkg name - for fspath in pkg_dir.glob("**/*"): - if fspath.is_dir(): - continue - with open(fspath) as f: - contents = f.read() - contents = re.sub(r"(?>>" in nipype_interface.__doc__: - match = re.search( - r"""^\s+>>> (?:\w+)\.cmdline(\s*# doctest: .*)?\n\s*(?:'|")?(.*)(?:'|")?\s*$""", - nipype_interface.__doc__, - flags=re.MULTILINE, - ) - if match: - cmdline = match.group(2) - cmdline = cmdline.replace("'", '"') - directive = match.group(1) - else: - cmdline = directive = None - doctest_inpts = { - n: v.replace("'", '"') - for n, v in re.findall( - r"""\s+>>> (?:\w+)\.inputs\.(\w+) ?= ?(.*)\n""", - nipype_interface.__doc__, - ) - } - if not doctest_inpts: - doctest_inpts = { - n: v.replace("'", '"') - for n, v in re.findall( - r"""\.\.\.\s+(\w+)=(.*) *\n""", - nipype_interface.__doc__, + doc_str = nipype_interface.__doc__ if nipype_interface.__doc__ else "" + doc_str = re.sub(r"\n\s+\.\.\.\s+", "", doc_str) + doctests: ty.List[DocTestGenerator] = [] + tests: ty.List[TestGenerator] = [] + + for doctest_str in doc_str.split("\n\n"): + if ">>>" in doctest_str: + try: + cmdline, inpts, directive = extract_doctest_inputs( + doctest_str, interface ) - } - if doctest_inpts: - match = re.search( - interface + r"""\((?"}, + for name, val in inpts.items(): + if name in file_inputs: + guessed_type = guess_type(val) + input_types[name] = combine_types( + guessed_type, input_types[name] + ) + else: + test_inpts[name] = val + if name in file_outputs: + guessed_type = guess_type(val) + output_types[name] = combine_types( + guessed_type, output_types[name] + ) + if name in genfile_outputs: + output_templates[name] = val + doctest_inpts[name] = None if name in file_inputs else val + + tests.append( + fields_stub( + "test", TestGenerator, {"inputs": copy(test_inpts)} + ) + ) + doctests.append( + fields_stub( + "doctest", + DocTestGenerator, + { + "cmdline": cmdline, + "inputs": doctest_inpts, + "directive": directive, + }, + ) ) - else: - doctest_stub = None - test_inpts = {} - output_templates = {} + # Add default template names for fields not explicitly listed in doctests for outpt in genfile_outputs: - try: - template = test_inpts[outpt] - except KeyError: + if outpt not in output_templates: try: frmt = output_types[outpt] except KeyError: ext = "" else: - ext = fileformats.core.utils.from_mime(frmt).ext - if not ext: - ext = "" - template = outpt + ext - output_templates[outpt] = template + if getattr(frmt, "_name", None) == "Union": + ext = ty.get_args(frmt)[0].strext + else: + ext = frmt.strext + output_templates[outpt] = outpt + ext + + def to_mime_like(type_): + if ty.get_origin(type_) is ty.Union: + mime_like = ",".join(a.mime_like for a in ty.get_args(type_)) + else: + mime_like = type_.mime_like + return mime_like spec_stub = { "name": interface, @@ -405,29 +298,29 @@ def guess_format_from_doctest(field): "inputs": fields_stub( "inputs", InputsConverter, - {"types": input_types}, + {"types": {n: to_mime_like(t) for n, t in input_types.items()}}, ), "outputs": fields_stub( "outputs", OutputsConverter, { - "types": output_types, + "types": { + n: to_mime_like(t) for n, t in output_types.items() + }, "templates": output_templates, }, ), - "test": fields_stub( - "test", TestsGenerator, {"inputs": copy(test_inpts)} - ), - "doctest": doctest_stub, + "tests": tests, + "doctests": doctests, } - yaml_str = yaml.dump(spec_stub, indent=4, sort_keys=False, width=4096) + yaml_str = yaml.dump(spec_stub, indent=2, sort_keys=False, width=4096) # Strip explicit nulls from dumped YAML yaml_str = yaml_str.replace(" null", "") # Inject comments into dumped YAML for category_name, category_class in [ ("inputs", InputsConverter), ("outputs", OutputsConverter), - ("test", TestsGenerator), + ("test", TestGenerator), ("doctest", DocTestGenerator), ]: for field in attrs.fields(category_class): @@ -452,12 +345,12 @@ def guess_format_from_doctest(field): f'"""Module to put any functions that are referred to in {interface}.yaml"""\n' ) - sp.check_call("git init", shell=True, cwd=pkg_dir) - sp.check_call("git add --all", shell=True, cwd=pkg_dir) - sp.check_call( - 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir - ) - sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) + # sp.check_call("git init", shell=True, cwd=pkg_dir) + # sp.check_call("git add --all", shell=True, cwd=pkg_dir) + # sp.check_call( + # 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir + # ) + # sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) unmatched_extensions = set( File.decompose_fspath( @@ -474,6 +367,190 @@ def guess_format_from_doctest(field): print("\n".join(str(p) for p in ambiguous_formats)) +def initialise_task_repo(output_dir, task_template: Path, pkg: str) -> Path: + """Copy the task template to the output directory and customise it for the given + package name and return the created package directory""" + + pkg_dir = output_dir / f"pydra-{pkg}" + + def copy_ignore(_, names): + return [n for n in names if n in (".git", "__pycache__", ".pytest_cache")] + + shutil.copytree(task_template, pkg_dir, ignore=copy_ignore) + + # Setup script to auto-convert nipype interfaces + auto_conv_dir = pkg_dir / "nipype-auto-conv" + specs_dir = auto_conv_dir / "specs" + specs_dir.mkdir(parents=True) + shutil.copy(RESOURCES_DIR / "nipype-auto-convert.py", auto_conv_dir / "generate") + os.chmod(auto_conv_dir / "generate", 0o755) # make executable + shutil.copy( + RESOURCES_DIR / "nipype-auto-convert-requirements.txt", + auto_conv_dir / "requirements.txt", + ) + + # Setup GitHub workflows + gh_workflows_dir = pkg_dir / ".github" / "workflows" + gh_workflows_dir.mkdir(parents=True, exist_ok=True) + shutil.copy( + RESOURCES_DIR / "gh_workflows" / "pythonpackage.yaml", + gh_workflows_dir / "pythonpackage.yaml", + ) + shutil.copy( + RESOURCES_DIR / "gh_workflows" / "auto-release.yaml", + gh_workflows_dir / "auto-release.yaml", + ) + + # Add in conftest.py + shutil.copy(RESOURCES_DIR / "conftest.py", pkg_dir / "conftest.py") + + # Add "pydra.tasks..auto to gitignore" + with open(pkg_dir / ".gitignore", "a") as f: + f.write(f"\npydra/tasks/{pkg}/auto") + + # rename tasks directory + (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename(pkg_dir / "pydra" / "tasks" / pkg) + + # Replace "CHANGEME" string with pkg name + for fspath in pkg_dir.glob("**/*"): + if fspath.is_dir(): + continue + with open(fspath) as f: + contents = f.read() + contents = re.sub(r"(? ty.Tuple[str, ty.List[str], ty.List[str], ty.List[str]]: + """Generate preamble comments at start of file with args and doc strings""" + inputs_desc = "" + file_inputs = [] + genfile_outputs = [] + if nipype_interface.input_spec: + for inpt_name, inpt in nipype_interface.input_spec().traits().items(): + if inpt_name in ("trait_added", "trait_modified"): + continue + inpt_desc = inpt.desc.replace("\n", " ") if inpt.desc else "" + inputs_desc += f"# {inpt_name} : {type(inpt.trait_type).__name__.lower()}\n# {inpt_desc}\n" + if inpt.genfile: + genfile_outputs.append(inpt_name) + elif type(inpt.trait_type).__name__ in ( + "File", + "InputMultiObject", + ): + file_inputs.append(inpt_name) + file_outputs = [] + outputs_desc = "" + if nipype_interface.output_spec: + for outpt_name, outpt in nipype_interface.output_spec().traits().items(): + if outpt_name in ("trait_added", "trait_modified"): + continue + outpt_desc = outpt.desc.replace("\n", " ") if outpt.desc else "" + outputs_desc += f"# {outpt_name} : {type(outpt.trait_type).__name__.lower()}\n# {outpt_desc}\n" + if type(outpt.trait_type).__name__ == "File": + file_outputs.append(outpt_name) + doc_string = nipype_interface.__doc__ if nipype_interface.__doc__ else "" + doc_string = doc_string.replace("\n", "\n# ") + # Create a preamble at the top of the specificaiton explaining what to do + preamble = ( + f"""# This file is used to manually specify the semi-automatic conversion of + # '{nipype_interface.__module__.replace('/', '.')}.{nipype_interface.__name__}' from Nipype to Pydra. + # + # Please fill-in/edit the fields below where appropriate + # + # Inputs + # ------ + {inputs_desc}# + # Outputs + # ------- + {outputs_desc}# + # Docs + # ---- + # {doc_string}\n""" + ).replace(" #", "#") + return preamble, file_inputs, file_outputs, genfile_outputs + + +def extract_doctest_inputs( + doctest: str, interface: str +) -> ty.Tuple[ty.Optional[str], dict[str, ty.Any], ty.Optional[str]]: + """Extract the inputs passed to tasks in the doctests of Nipype interfaces + + Parameters + ---------- + doctest : str + the doc string of the interface + interface : str + the name of the interface + + Returns + ------- + cmdline : str + the expected cmdline + inputs : dict[str, ty.Any] + the inputs passed to the task + directive : str + any doctest directives found after the cmdline, e.g. ELLIPSIS""" + match = re.search( + r"""^\s+>>> (?:\w+)\.cmdline(\s*# doctest: .*)?\n\s*('|")(.*)(?:'|")?\s*.*(?!>>>)\2""", + doctest, + flags=re.MULTILINE | re.DOTALL, + ) + if match: + cmdline = match.group(3) + cmdline = re.sub(r"\s+", " ", cmdline) + cmdline = cmdline.replace("'", '"') + directive = match.group(2) + else: + cmdline = directive = None + doctest_inpts = { + n: v.replace("'", '"') + for n, v in re.findall( + r"""\s+>>> (?:\w+)\.inputs\.(\w+) ?= ?(.*)\n""", + doctest, + ) + } + # if not doctest_inpts: + # doctest_inpts = { + # n: v.replace("'", '"') + # for n, v in re.findall( + # r"""\.\.\.\s+(\w+)=(.*) *\n""", + # doctest, + # ) + # } + # if doctest_inpts: + # match = re.search( + # interface + r"""\((? Date: Mon, 31 Jul 2023 12:36:20 +1000 Subject: [PATCH 22/42] handle imports in doctests and unittests --- nipype2pydra/task.py | 313 ++++++++++++++++++----------- scripts/pkg_gen/create_packages.py | 121 +++++++---- 2 files changed, 276 insertions(+), 158 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 8713142c..41b3ea87 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -15,22 +15,40 @@ from pydra.engine import specs from pydra.engine.helpers import ensure_list from .utils import import_module_from_path, is_fileset -from fileformats.core import DataType +from fileformats.core import from_mime + + +T = ty.TypeVar("T") + + +def from_dict_converter( + obj: ty.Union[T, dict], klass: ty.Type[T], allow_none=False +) -> T: + if obj is None: + if allow_none: + converted = None + else: + converted = klass() + elif isinstance(obj, dict): + converted = klass(**obj) + elif isinstance(obj, klass): + converted = obj + else: + raise TypeError( + f"Input must be of type {klass} or dict, not {type(obj)}: {obj}" + ) + return converted def str_to_type(type_str: str) -> type: """Resolve a string representation of a type into a valid type""" if "/" in type_str: - if "," in type_str: - union_tps = tuple(str_to_type(p) for p in type_str.split(",")) - tp: ty.Type[ty.Union] = ty.Union.__getitem__(union_tps) # type: ignore - else: - tp = DataType.from_mime(type_str) - try: - # If datatype is a field, use its primitive instead - tp = tp.primitive # type: ignore - except AttributeError: - pass + tp = from_mime(type_str) + try: + # If datatype is a field, use its primitive instead + tp = tp.primitive # type: ignore + except AttributeError: + pass elif "." in type_str: parts = type_str.split(".") module = import_module(".".join(parts[:-1])) @@ -55,6 +73,20 @@ def types_converter(types: ty.Dict[str, ty.Union[str, type]]) -> ty.Dict[str, ty return converted +@attrs.define +class ImportStatement: + + module: str + name: ty.Optional[str] = None + alias: ty.Optional[str] = None + + +def from_list_to_imports(obj: ty.Union[ty.List[ImportStatement], list]) -> ty.List[ImportStatement]: + if obj is None: + return [] + return [from_dict_converter(t, ImportStatement) for t in obj] + + @attrs.define class SpecConverter: omit: ty.List[str] = attrs.field( @@ -173,7 +205,10 @@ class TestGenerator: inputs : dict[str, str], optional values to provide to specific inputs fields (if not provided, a sensible value within the valid range will be provided) - outputs: dict[str, str], optional + imports : list[ImportStatement or dict] + list import statements required by the test, with each list item + consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: dict[str, str], optional expected values for selected outputs, noting that in tests will typically be terminated before they complete for time-saving reasons and will therefore be ignored @@ -190,6 +225,14 @@ class TestGenerator: (if not specified, will try to choose a sensible value)""" }, ) + imports: ty.List[ImportStatement] = attrs.field( + factory=list, + converter=from_list_to_imports, + metadata={ + "help": """list import statements required by the test, with each list item + consisting of 'module', 'name', and optionally 'alias' keys""" + } + ) expected_outputs: ty.Dict[str, str] = attrs.field( factory=dict, converter=default_if_none(factory=dict), # type: ignore @@ -228,6 +271,11 @@ class DocTestGenerator: inputs : dict[str, str or None] name-value pairs for inputs to be provided to the doctest. If the value is None then the ".mock()" method of the corresponding class is used instead. + imports : list[ImportStatement or dict] + list import statements required by the test, with each list item + consisting of 'module', 'name', and optionally 'alias' keys + directive : str + any doctest directive to be applied to the cmdline line """ cmdline: str = attrs.field(metadata={"help": "the expected cmdline output"}) @@ -239,6 +287,14 @@ class DocTestGenerator: '.mock()' method of the corresponding class is used instead.""" }, ) + imports: ty.List[ImportStatement] = attrs.field( + factory=list, + converter=from_list_to_imports, + metadata={ + "help": """list import statements required by the test, with each list item + consisting of 'module', 'name', and optionally 'alias' keys""" + } + ) directive: str = attrs.field( default=None, metadata={ @@ -247,28 +303,6 @@ class DocTestGenerator: ) -T = ty.TypeVar("T") - - -def from_dict_converter( - obj: ty.Union[T, dict], klass: ty.Type[T], allow_none=False -) -> T: - if obj is None: - if allow_none: - converted = None - else: - converted = klass() - elif isinstance(obj, dict): - converted = klass(**obj) - elif isinstance(obj, klass): - converted = obj - else: - raise TypeError( - f"Input must be of type {klass} or dict, not {type(obj)}: {obj}" - ) - return converted - - def from_dict_to_inputs(obj: ty.Union[InputsConverter, dict]) -> InputsConverter: return from_dict_converter(obj, InputsConverter) @@ -404,12 +438,12 @@ def generate(self, package_root: Path): filename_test = testdir / f"test_{self.task_name.lower()}.py" # filename_test_run = testdir / f"test_run_{self.task_name.lower()}.py" - self.write_test( + self.write_tests( filename_test, input_fields=input_fields, nonstd_types=nonstd_types, ) - # self.write_test(filename_test=filename_test_run, run=True) + # self.write_tests(filename_test=filename_test_run, run=True) def convert_input_fields(self): """creating fields list for pydra input spec""" @@ -658,17 +692,17 @@ def types_to_names(spec_fields): spec_str = ( "from pydra.engine import specs \nfrom pydra import ShellCommandTask \n" ) - spec_str += self.import_types(nonstd_types) spec_str += functions_str spec_str += f"input_fields = {input_fields_str}\n" spec_str += f"{self.task_name}_input_spec = specs.SpecInfo(name='Input', fields=input_fields, bases=(specs.ShellSpec,))\n\n" spec_str += f"output_fields = {output_fields_str}\n" spec_str += f"{self.task_name}_output_spec = specs.SpecInfo(name='Output', fields=output_fields, bases=(specs.ShellOutSpec,))\n\n" spec_str += f"class {self.task_name}(ShellCommandTask):\n" - if self.doctest is not None: - spec_str += self.create_doctest( - input_fields=input_fields, nonstd_types=nonstd_types - ) + spec_str += ' """\n' + spec_str += self.create_doctests( + input_fields=input_fields, nonstd_types=nonstd_types + ) + spec_str += ' """\n' spec_str += f" input_spec = {self.task_name}_input_spec\n" spec_str += f" output_spec = {self.task_name}_output_spec\n" spec_str += f" executable='{self.nipype_interface._cmd}'\n" @@ -677,6 +711,9 @@ def types_to_names(spec_fields): spec_str = spec_str.replace(*tp_repl) spec_str = re.sub(r'"TYPE_(\w+)"', r"\1", spec_str) + imports = self.construct_imports(nonstd_types, spec_str, include_task=False) + spec_str = "\n".join(imports) + "\n\n" + spec_str + spec_str_black = black.format_file_contents( spec_str, fast=False, mode=black.FileMode() ) @@ -684,64 +721,104 @@ def types_to_names(spec_fields): with open(filename, "w") as f: f.write(spec_str_black) - @staticmethod - def import_types(nonstd_types: ty.List[type], prefix="") -> str: - imports = f"{prefix}import typing as ty\n{prefix}from pathlib import Path\n" - for tp in nonstd_types: - imports += f"{prefix}from {tp.__module__} import {tp.__name__}\n" - return imports - - def write_test(self, filename_test, input_fields, nonstd_types, run=False): - spec_str = "import os\nimport pytest\n" - spec_str += self.import_types(nonstd_types=nonstd_types) - spec_str += f"from {self.output_module} import {self.task_name}\n" - spec_str += "\n" - if self.test.xfail: - spec_str += "@pytest.mark.xfail\n" - spec_str += f"@pytest.mark.timeout_pass(timeout={self.test.timeout})\n" - spec_str += f"def test_{self.task_name.lower()}():\n" - spec_str += f" task = {self.task_name}()\n" - for field in input_fields: - nm, tp = field[:2] - # Try to get a sensible value for the traits value + def construct_imports(self, nonstd_types: ty.List[type], spec_str="", base=(), include_task=True) -> ty.List[str]: + """Constructs a list of imports to include at start of file""" + stmts: ty.Dict[str, str] = {} + + def add_import(stmt): + match = re.match(r".* as (\w+)\s*", stmt) + if not match: + match = re.match(r".*import (\w+)\s*$", stmt) + if not match: + raise ValueError(f"Unrecognised import statment {stmt}") + token = match.group(1) try: - value = self.test.inputs[nm] + prev_stmt = stmts[token] except KeyError: - if len(field) == 4: # field has default - if isinstance(field[2], bool): - value = str(field[2]) - else: - value = json.dumps(field[2]) + pass + else: + if prev_stmt != stmt: + raise ValueError( + f"Cannot add import statement {stmt} as it clashes with " + f"previous import {prev_stmt}" + ) + stmts[token] = stmt + + for b in base: + add_import(b) + + if re.match(r".*(?>> ") - doctest += f" >>> from {self.output_module} import {self.task_name}\n" - doctest += f" >>> task = {self.task_name}()\n" - for field in input_fields: - nm, tp = field[:2] - try: - val = self.doctest.inputs[nm] - except KeyError: - if is_fileset(tp): - val = f"{tp.__name__}.mock()" + doctest_str = "" + for doctest in self.doctests: + doctest_str += f" >>> task = {self.task_name}()\n" + for field in input_fields: + nm, tp = field[:2] + try: + val = doctest.inputs[nm] + except KeyError: + if is_fileset(tp): + val = f"{tp.__name__}.mock()" + else: + val = attrs.NOTHING else: - val = attrs.NOTHING - else: - if type(val) is str: - val = f'"{val}"' - if val is not attrs.NOTHING: - doctest += f" >>> task.inputs.{nm} = {val}\n" - doctest += " >>> task.cmdline\n" - doctest += f" '{self.doctest.cmdline}'" - doctest += '\n """\n' - return doctest + if type(val) is str: + val = f'"{val}"' + if val is not attrs.NOTHING: + doctest_str += f" >>> task.inputs.{nm} = {val}\n" + doctest_str += " >>> task.cmdline\n" + doctest_str += f" '{doctest.cmdline}'" + doctest_str += '\n' + + imports = self.construct_imports(nonstd_types, doctest_str) + if imports: + doctest_str = " >>> " + "\n >>> ".join(imports) + doctest_str + + return ' Examples\n -------\n' + doctest_str INPUT_KEYS = [ "allowed_values", diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 36107f08..f1433338 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -3,7 +3,6 @@ import tempfile import re from importlib import import_module -import subprocess as sp from copy import copy import shutil import tarfile @@ -18,7 +17,7 @@ from fileformats.generic import File from fileformats.medimage import Nifti1, NiftiGz, Bval, Bvec from fileformats.misc import Dicom -from fileformats.text import Txt +from fileformats.text import TextFile from fileformats.datascience import MatFile, DatFile from fileformats.serialization import Xml import nipype.interfaces.base.core @@ -32,7 +31,7 @@ RESOURCES_DIR = Path(__file__).parent / "resources" -EXPECTED_FORMATS = [Nifti1, NiftiGz, Txt, MatFile, DatFile, Xml] +EXPECTED_FORMATS = [Nifti1, NiftiGz, TextFile, MatFile, DatFile, Xml] def download_tasks_template(output_path: Path): @@ -92,6 +91,7 @@ def generate_packages( not_interfaces = [] unmatched_formats = [] ambiguous_formats = [] + has_doctests = set() for pkg in to_import["packages"]: pkg_dir = initialise_task_repo(output_dir, task_template, pkg) @@ -126,6 +126,7 @@ def generate_packages( file_inputs, file_outputs, genfile_outputs, + multi_inputs, ) = generate_spec_preamble(nipype_interface) # Create "stubs" for each of the available fields @@ -160,13 +161,22 @@ def fields_stub(name, category_class, values=None): # values doc_str = nipype_interface.__doc__ if nipype_interface.__doc__ else "" doc_str = re.sub(r"\n\s+\.\.\.\s+", "", doc_str) + prev_block = "" + doctest_blocks = [] + for para in doc_str.split("\n\n"): + if "cmdline" in para: + doctest_blocks.append(prev_block + para) + prev_block = "" + else: + prev_block += para + doctests: ty.List[DocTestGenerator] = [] tests: ty.List[TestGenerator] = [] - for doctest_str in doc_str.split("\n\n"): + for doctest_str in doctest_blocks: if ">>>" in doctest_str: try: - cmdline, inpts, directive = extract_doctest_inputs( + cmdline, inpts, directive, imports = extract_doctest_inputs( doctest_str, interface ) except ValueError: @@ -255,7 +265,9 @@ def combine_types(type_, prev_type): tests.append( fields_stub( - "test", TestGenerator, {"inputs": copy(test_inpts)} + "test", + TestGenerator, + {"inputs": copy(test_inpts), "imports": imports}, ) ) doctests.append( @@ -265,10 +277,12 @@ def combine_types(type_, prev_type): { "cmdline": cmdline, "inputs": doctest_inpts, + "imports": imports, "directive": directive, }, ) ) + has_doctests.add(f"{module.replace('/', '.')}.{interface}") # Add default template names for fields not explicitly listed in doctests for outpt in genfile_outputs: @@ -284,12 +298,11 @@ def combine_types(type_, prev_type): ext = frmt.strext output_templates[outpt] = outpt + ext - def to_mime_like(type_): - if ty.get_origin(type_) is ty.Union: - mime_like = ",".join(a.mime_like for a in ty.get_args(type_)) - else: - mime_like = type_.mime_like - return mime_like + # convert to multi-input types to lists + input_types = { + n: ty.List[t] if n in multi_inputs else t + for n, t in input_types.items() + } spec_stub = { "name": interface, @@ -298,14 +311,20 @@ def to_mime_like(type_): "inputs": fields_stub( "inputs", InputsConverter, - {"types": {n: to_mime_like(t) for n, t in input_types.items()}}, + { + "types": { + n: fileformats.core.utils.to_mime(t) + for n, t in input_types.items() + } + }, ), "outputs": fields_stub( "outputs", OutputsConverter, { "types": { - n: to_mime_like(t) for n, t in output_types.items() + n: fileformats.core.utils.to_mime(t) + for n, t in output_types.items() }, "templates": output_templates, }, @@ -365,6 +384,8 @@ def to_mime_like(type_): print("\n".join(sorted(unmatched_extensions))) print("\nAmbiguous formats") print("\n".join(str(p) for p in ambiguous_formats)) + print("\nWith doctests") + print("\n".join(sorted(has_doctests))) def initialise_task_repo(output_dir, task_template: Path, pkg: str) -> Path: @@ -426,11 +447,12 @@ def copy_ignore(_, names): def generate_spec_preamble( nipype_interface, -) -> ty.Tuple[str, ty.List[str], ty.List[str], ty.List[str]]: +) -> ty.Tuple[str, ty.List[str], ty.List[str], ty.List[str], ty.List[str]]: """Generate preamble comments at start of file with args and doc strings""" inputs_desc = "" file_inputs = [] genfile_outputs = [] + multi_inputs = [] if nipype_interface.input_spec: for inpt_name, inpt in nipype_interface.input_spec().traits().items(): if inpt_name in ("trait_added", "trait_modified"): @@ -439,11 +461,11 @@ def generate_spec_preamble( inputs_desc += f"# {inpt_name} : {type(inpt.trait_type).__name__.lower()}\n# {inpt_desc}\n" if inpt.genfile: genfile_outputs.append(inpt_name) - elif type(inpt.trait_type).__name__ in ( - "File", - "InputMultiObject", - ): + elif type(inpt.trait_type).__name__ == "File": + file_inputs.append(inpt_name) + elif type(inpt.trait_type).__name__ == "InputMultiObject": file_inputs.append(inpt_name) + multi_inputs.append(inpt_name) file_outputs = [] outputs_desc = "" if nipype_interface.output_spec: @@ -473,12 +495,14 @@ def generate_spec_preamble( # ---- # {doc_string}\n""" ).replace(" #", "#") - return preamble, file_inputs, file_outputs, genfile_outputs + return preamble, file_inputs, file_outputs, genfile_outputs, multi_inputs def extract_doctest_inputs( doctest: str, interface: str -) -> ty.Tuple[ty.Optional[str], dict[str, ty.Any], ty.Optional[str]]: +) -> ty.Tuple[ + ty.Optional[str], dict[str, ty.Any], ty.Optional[str], ty.List[ty.Dict[str, str]] +]: """Extract the inputs passed to tasks in the doctests of Nipype interfaces Parameters @@ -497,7 +521,7 @@ def extract_doctest_inputs( directive : str any doctest directives found after the cmdline, e.g. ELLIPSIS""" match = re.search( - r"""^\s+>>> (?:\w+)\.cmdline(\s*# doctest: .*)?\n\s*('|")(.*)(?:'|")?\s*.*(?!>>>)\2""", + r"""^\s+>>> (?:.*)\.cmdline(\s*# doctest: .*)?\n\s*('|")(.*)(?:'|")?\s*.*(?!>>>)\2""", doctest, flags=re.MULTILINE | re.DOTALL, ) @@ -506,6 +530,8 @@ def extract_doctest_inputs( cmdline = re.sub(r"\s+", " ", cmdline) cmdline = cmdline.replace("'", '"') directive = match.group(2) + if directive == '"': + directive = None else: cmdline = directive = None doctest_inpts = { @@ -515,26 +541,8 @@ def extract_doctest_inputs( doctest, ) } - # if not doctest_inpts: - # doctest_inpts = { - # n: v.replace("'", '"') - # for n, v in re.findall( - # r"""\.\.\.\s+(\w+)=(.*) *\n""", - # doctest, - # ) - # } - # if doctest_inpts: - # match = re.search( - # interface + r"""\((?>>.*(?>> import (.*)$", ln) + if match: + for mod in match.group(1).split(","): + imports.append({"module": mod.strip()}) + else: + match = re.match(r".*>>> from ([\w\.]+) import (.*)", ln) + if not match: + raise ValueError(f"Could not parse import statement: {ln}") + module = match.group(1) + if "nipype.interfaces" in module: + continue + for atr in match.group(2).split(","): + match = re.match(r"(\w+) as ((\w+))", atr) + if match: + name = match.group(1) + alias = match.group(2) + else: + name = atr + alias = None + imports.append( + { + "module": module, + "name": name, + "alias": alias, + } + ) if not doctest_inpts: raise ValueError(f"Could not parse doctest:\n{doctest}") - return cmdline, doctest_inpts, directive + return cmdline, doctest_inpts, directive, imports if __name__ == "__main__": From be84daedcb7fc431f9b38503317e44c982969cdf Mon Sep 17 00:00:00 2001 From: Tom Close Date: Mon, 31 Jul 2023 15:23:29 +1000 Subject: [PATCH 23/42] touching up package initialisation in create_packages --- nipype2pydra/task.py | 55 ++++++++++------ scripts/pkg_gen/create_packages.py | 3 + scripts/pkg_gen/resources/conftest.py | 64 +++++++++++++------ .../pkg_gen/resources/nipype-auto-convert.py | 7 +- scripts/pkg_gen/resources/pkg_init.py | 38 +++++++---- 5 files changed, 115 insertions(+), 52 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 41b3ea87..a889b796 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -75,13 +75,14 @@ def types_converter(types: ty.Dict[str, ty.Union[str, type]]) -> ty.Dict[str, ty @attrs.define class ImportStatement: - module: str name: ty.Optional[str] = None alias: ty.Optional[str] = None -def from_list_to_imports(obj: ty.Union[ty.List[ImportStatement], list]) -> ty.List[ImportStatement]: +def from_list_to_imports( + obj: ty.Union[ty.List[ImportStatement], list] +) -> ty.List[ImportStatement]: if obj is None: return [] return [from_dict_converter(t, ImportStatement) for t in obj] @@ -231,7 +232,7 @@ class TestGenerator: metadata={ "help": """list import statements required by the test, with each list item consisting of 'module', 'name', and optionally 'alias' keys""" - } + }, ) expected_outputs: ty.Dict[str, str] = attrs.field( factory=dict, @@ -293,7 +294,7 @@ class DocTestGenerator: metadata={ "help": """list import statements required by the test, with each list item consisting of 'module', 'name', and optionally 'alias' keys""" - } + }, ) directive: str = attrs.field( default=None, @@ -311,13 +312,17 @@ def from_dict_to_outputs(obj: ty.Union[OutputsConverter, dict]) -> OutputsConver return from_dict_converter(obj, OutputsConverter) -def from_list_to_tests(obj: ty.Union[ty.List[TestGenerator], list]) -> ty.List[TestGenerator]: +def from_list_to_tests( + obj: ty.Union[ty.List[TestGenerator], list] +) -> ty.List[TestGenerator]: if obj is None: return [] return [from_dict_converter(t, TestGenerator) for t in obj] -def from_list_to_doctests(obj: ty.Union[ty.List[DocTestGenerator], list]) -> ty.List[DocTestGenerator]: +def from_list_to_doctests( + obj: ty.Union[ty.List[DocTestGenerator], list] +) -> ty.List[DocTestGenerator]: if obj is None: return [] return [from_dict_converter(t, DocTestGenerator) for t in obj] @@ -689,10 +694,7 @@ def types_to_names(spec_fields): input_fields_str = types_to_names(spec_fields=input_fields) output_fields_str = types_to_names(spec_fields=output_fields) functions_str = self.function_callables() - spec_str = ( - "from pydra.engine import specs \nfrom pydra import ShellCommandTask \n" - ) - spec_str += functions_str + spec_str = functions_str spec_str += f"input_fields = {input_fields_str}\n" spec_str += f"{self.task_name}_input_spec = specs.SpecInfo(name='Input', fields=input_fields, bases=(specs.ShellSpec,))\n\n" spec_str += f"output_fields = {output_fields_str}\n" @@ -711,7 +713,15 @@ def types_to_names(spec_fields): spec_str = spec_str.replace(*tp_repl) spec_str = re.sub(r'"TYPE_(\w+)"', r"\1", spec_str) - imports = self.construct_imports(nonstd_types, spec_str, include_task=False) + imports = self.construct_imports( + nonstd_types, + spec_str, + include_task=False, + base=( + "from pydra.engine import specs", + "from pydra.engine import ShellCommandTask", + ), + ) spec_str = "\n".join(imports) + "\n\n" + spec_str spec_str_black = black.format_file_contents( @@ -721,7 +731,9 @@ def types_to_names(spec_fields): with open(filename, "w") as f: f.write(spec_str_black) - def construct_imports(self, nonstd_types: ty.List[type], spec_str="", base=(), include_task=True) -> ty.List[str]: + def construct_imports( + self, nonstd_types: ty.List[type], spec_str="", base=(), include_task=True + ) -> ty.List[str]: """Constructs a list of imports to include at start of file""" stmts: ty.Dict[str, str] = {} @@ -756,7 +768,11 @@ def add_import(stmt): if stmt.name is None: add_import(f"import {stmt.module}") else: - nm = stmt.name if stmt.alias is None else f"{stmt.name} as {stmt.alias}" + nm = ( + stmt.name + if stmt.alias is None + else f"{stmt.name} as {stmt.alias}" + ) add_import(f"from {stmt.module} import {nm}") for tp in nonstd_types: add_import(f"from {tp.__module__} import {tp.__name__}") @@ -766,12 +782,11 @@ def add_import(stmt): return list(stmts.values()) def write_tests(self, filename_test, input_fields, nonstd_types, run=False): - spec_str = "" for i, test in enumerate(self.tests, start=1): if test.xfail: spec_str += "@pytest.mark.xfail\n" - spec_str += f"@pytest.mark.timeout_pass(timeout={test.timeout})\n" + spec_str += f"@pass_after_timeout(seconds={test.timeout})\n" spec_str += f"def test_{self.task_name.lower()}_{i}():\n" spec_str += f" task = {self.task_name}()\n" for field in input_fields: @@ -817,7 +832,11 @@ def write_tests(self, filename_test, input_fields, nonstd_types, run=False): spec_str += f" assert res.output.{name} == {value}\n" spec_str += "\n\n\n" - imports = self.construct_imports(nonstd_types, spec_str, base={"import os", "import pytest"}) + imports = self.construct_imports( + nonstd_types, + spec_str, + base={"import pytest", "from conftest import pass_after_timeout"}, + ) spec_str = "\n".join(imports) + "\n\n" + spec_str spec_str_black = black.format_file_contents( @@ -848,13 +867,13 @@ def create_doctests(self, input_fields, nonstd_types): doctest_str += f" >>> task.inputs.{nm} = {val}\n" doctest_str += " >>> task.cmdline\n" doctest_str += f" '{doctest.cmdline}'" - doctest_str += '\n' + doctest_str += "\n" imports = self.construct_imports(nonstd_types, doctest_str) if imports: doctest_str = " >>> " + "\n >>> ".join(imports) + doctest_str - return ' Examples\n -------\n' + doctest_str + return " Examples\n -------\n" + doctest_str INPUT_KEYS = [ "allowed_values", diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index f1433338..3a6307d3 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -432,6 +432,9 @@ def copy_ignore(_, names): # rename tasks directory (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename(pkg_dir / "pydra" / "tasks" / pkg) + # Add in modified __init__.py + shutil.copy(RESOURCES_DIR / "pkg_init.py", pkg_dir / "pydra" / "tasks" / pkg / "__init__.py") + # Replace "CHANGEME" string with pkg name for fspath in pkg_dir.glob("**/*"): if fspath.is_dir(): diff --git a/scripts/pkg_gen/resources/conftest.py b/scripts/pkg_gen/resources/conftest.py index 9ce8ebb1..599a8ab4 100644 --- a/scripts/pkg_gen/resources/conftest.py +++ b/scripts/pkg_gen/resources/conftest.py @@ -1,11 +1,13 @@ import os -import typing as ty import time import logging from pathlib import Path +from traceback import format_exc import tempfile import threading +from dataclasses import dataclass import pytest +from _pytest.runner import TestReport try: @@ -51,41 +53,67 @@ def work_dir(): return Path(work_dir) -def timeout_pass(timeout): +def pass_after_timout(seconds, poll_interval=0.1): """Cancel the test after a certain period, after which it is assumed that the arguments passed to the underying command have passed its internal validation (so we don't have to wait until the tool completes) Parameters ---------- - timeout : int - the number of seconds to wait until cancelling the test + seconds : int + the number of seconds to wait until cancelling the test (and marking it as passed) """ + def decorator(test_func): def wrapper(*args, **kwargs): - result = [None] - exception = [False] - timeout_event = threading.Event() + @dataclass + class TestState: + """A way of passing a reference to the result that can be updated by + the test thread""" + + result = None + exception = None + + state = TestState() def test_runner(): try: - result[0] = test_func(*args, **kwargs) - except Exception: - exception[0] = True + state.result = test_func(*args, **kwargs) + except Exception as e: + state.exception = e + # raise + # state.trace_back = format_exc() + # raise thread = threading.Thread(target=test_runner) thread.start() - timeout_event.wait(timeout) - if thread.is_alive(): - timeout_event.set() - thread.join() - return result[0] + # Calculate the end time for the timeout + end_time = time.time() + seconds - if exception[0]: - raise Exception("Test raised an exception during execution.") + while thread.is_alive() and time.time() < end_time: + time.sleep(poll_interval) - return result[0] + if thread.is_alive(): + thread.join() + return state.result + + if state.trace_back: + raise state.exception + + outcome = "passed after timeout" + rep = TestReport.from_item_and_call( + item=args[0], + when="call", + excinfo=None, + outcome=outcome, + sections=None, + duration=0, + keywords=None, + ) + args[0].ihook.pytest_runtest_logreport(report=rep) + + return state.result return wrapper diff --git a/scripts/pkg_gen/resources/nipype-auto-convert.py b/scripts/pkg_gen/resources/nipype-auto-convert.py index 227c9ba1..cacc9d15 100644 --- a/scripts/pkg_gen/resources/nipype-auto-convert.py +++ b/scripts/pkg_gen/resources/nipype-auto-convert.py @@ -24,10 +24,6 @@ f"using development version of nipype2pydra ({nipype2pydra.__version__}), " f"development component will be dropped in {PKG_NAME} package version" ) -n2p_version = nipype2pydra.__version__.split(".dev")[0] - -auto_version = f"{nipype.__version__}.{n2p_version}" - # Insert specs dir into path so we can load callables modules sys.path.insert(0, str(SPECS_DIR)) @@ -59,7 +55,8 @@ f.write( f"""# Auto-generated by {__file__}, do not edit as it will be overwritten -auto_version = {auto_version} +nipype_version = "{nipype.__version__}" +nipype2pydra_version = "{nipype2pydra.__version__.split('.dev')[0]}" """ ) diff --git a/scripts/pkg_gen/resources/pkg_init.py b/scripts/pkg_gen/resources/pkg_init.py index fa251d0d..a5644cd8 100644 --- a/scripts/pkg_gen/resources/pkg_init.py +++ b/scripts/pkg_gen/resources/pkg_init.py @@ -3,20 +3,36 @@ imported. >>> import pydra.engine ->>> import pydra.tasks.freesurfer +>>> import pydra.tasks.CHANGEME """ +from warnings import warn +from pathlib import Path +pkg_path = Path(__file__).parent.parent + try: from ._version import __version__ as main_version except ImportError: - pass - -from .auto._version import auto_version # Get version of - -if ".dev" in main_version: - main_version, dev_version = main_version.split(".dev") + raise RuntimeError( + "pydra-CHANGEME has not been properly installed, please run " + f"`pip install -e {str(pkg_path)}` to install a development version" + ) +try: + from .auto._version import nipype_version, nipype2pydra_version +except ImportError: + warn( + "Nipype interfaces haven't been automatically converted from their specs in " + f"`nipype-auto-conv`. Please run `{str(pkg_path / 'nipype-auto-conv' / 'generate')}` " + "to generated the converted Nipype interfaces in pydra.tasks.CHANGEME.auto" + ) + __version__ = main_version else: - dev_version = None + n_ver = nipype_version.replace(".", "_") + n2p_ver = nipype2pydra_version.replace(".", "_") + __version__ = ( + main_version + + ("_" if "+" in main_version else "+") + + f"nipype{n_ver}_nipype2pydra{n2p_ver}" + ) + -__version__ = main_version + "." + auto_version -if dev_version: - __version__ += ".dev" + dev_version +__all__ = ["__version__"] From cb5deb2fe4d01175bbdd62418aae7f4901dfb0f8 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Mon, 31 Jul 2023 16:11:22 +1000 Subject: [PATCH 24/42] debugged input typing --- nipype2pydra/task.py | 49 ++++++++++++++++-------------- scripts/pkg_gen/create_packages.py | 9 +++--- 2 files changed, 31 insertions(+), 27 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index a889b796..24e3f0b5 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -610,7 +610,7 @@ def pydra_type_converter(self, field, spec_type, name): raise Exception( f"spec_type has to be input or output, but {spec_type} provided" ) - types_dict = self.inputs.types if spec_type == "inputs" else self.outputs.types + types_dict = self.inputs.types if spec_type == "input" else self.outputs.types try: return types_dict[name] except KeyError: @@ -711,7 +711,7 @@ def types_to_names(spec_fields): for tp_repl in self.TYPE_REPLACE: spec_str = spec_str.replace(*tp_repl) - spec_str = re.sub(r'"TYPE_(\w+)"', r"\1", spec_str) + spec_str = re.sub(r"'TYPE_(\w+)'", r"\1", spec_str) imports = self.construct_imports( nonstd_types, @@ -724,12 +724,12 @@ def types_to_names(spec_fields): ) spec_str = "\n".join(imports) + "\n\n" + spec_str - spec_str_black = black.format_file_contents( + spec_str = black.format_file_contents( spec_str, fast=False, mode=black.FileMode() ) with open(filename, "w") as f: - f.write(spec_str_black) + f.write(spec_str) def construct_imports( self, nonstd_types: ty.List[type], spec_str="", base=(), include_task=True @@ -802,26 +802,31 @@ def write_tests(self, filename_test, input_fields, nonstd_types, run=False): value = json.dumps(field[2]) else: assert len(field) == 3 + # Attempt to pick a sensible value for field + trait = self.nipype_interface.input_spec.class_traits()[nm] + if isinstance(trait, traits.trait_types.Enum): + value = trait.values[0] + elif isinstance(trait, traits.trait_types.Range): + value = (trait.high - trait.low) / 2.0 + elif isinstance(trait, traits.trait_types.Bool): + value = True + elif isinstance(trait, traits.trait_types.Int): + value = 1 + elif isinstance(trait, traits.trait_types.Float): + value = 1.0 + elif isinstance(trait, traits.trait_types.List): + value = [1] * trait.minlen + elif isinstance(trait, traits.trait_types.Tuple): + value = tuple([1] * len(trait.types)) + else: + value = attrs.NOTHING + else: + if value is None: if is_fileset(tp): value = f"{tp.__name__}.sample()" - else: - trait = self.nipype_interface.input_spec.class_traits()[nm] - if isinstance(trait, traits.trait_types.Enum): - value = trait.values[0] - elif isinstance(trait, traits.trait_types.Range): - value = (trait.high - trait.low) / 2.0 - elif isinstance(trait, traits.trait_types.Bool): - value = True - elif isinstance(trait, traits.trait_types.Int): - value = 1 - elif isinstance(trait, traits.trait_types.Float): - value = 1.0 - elif isinstance(trait, traits.trait_types.List): - value = [1] * trait.minlen - elif isinstance(trait, traits.trait_types.Tuple): - value = tuple([1] * len(trait.types)) - else: - value = attrs.NOTHING + elif ty.get_origin(tp) in (list, ty.Union) and is_fileset(ty.get_args(tp)[0]): + arg_tp = ty.get_args(tp)[0] + value = f"{arg_tp.__name__}.sample()" if value is not attrs.NOTHING: spec_str += f" task.inputs.{nm} = {value}\n" if hasattr(self.nipype_interface, "_cmd"): diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 3a6307d3..b9f2deff 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -244,14 +244,14 @@ def combine_types(type_, prev_type): ) return type_ - test_inpts = {} - doctest_inpts = {} + test_inpts: ty.Dict[str, ty.Optional[ty.Type]] = {} for name, val in inpts.items(): if name in file_inputs: guessed_type = guess_type(val) input_types[name] = combine_types( guessed_type, input_types[name] ) + test_inpts[name] = None else: test_inpts[name] = val if name in file_outputs: @@ -261,13 +261,12 @@ def combine_types(type_, prev_type): ) if name in genfile_outputs: output_templates[name] = val - doctest_inpts[name] = None if name in file_inputs else val tests.append( fields_stub( "test", TestGenerator, - {"inputs": copy(test_inpts), "imports": imports}, + {"inputs": test_inpts, "imports": imports}, ) ) doctests.append( @@ -276,7 +275,7 @@ def combine_types(type_, prev_type): DocTestGenerator, { "cmdline": cmdline, - "inputs": doctest_inpts, + "inputs": copy(test_inpts), "imports": imports, "directive": directive, }, From 4202323fd9aded05305a570dd460186c9b92d9e9 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 14:45:00 +1000 Subject: [PATCH 25/42] debugged auto creation of packages --- nipype2pydra/task.py | 137 +++++++++++++----- scripts/pkg_gen/create_packages.py | 32 ++-- scripts/pkg_gen/resources/README.md | 47 ++++++ .../pkg_gen/resources/nipype-auto-convert.py | 11 +- scripts/pkg_gen/resources/pkg_init.py | 35 +++-- 5 files changed, 189 insertions(+), 73 deletions(-) create mode 100644 scripts/pkg_gen/resources/README.md diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 24e3f0b5..8bae92e1 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -16,6 +16,7 @@ from pydra.engine.helpers import ensure_list from .utils import import_module_from_path, is_fileset from fileformats.core import from_mime +from fileformats.generic import File T = ty.TypeVar("T") @@ -413,18 +414,24 @@ def nipype_output_spec(self) -> nipype.interfaces.base.BaseTraitedSpec: def task_name(self): return self.new_name if self.new_name is not None else self.name - def generate(self, package_root: Path): + def generate(self, package_root: Path, with_conftest: bool = True): """creating pydra input/output spec from nipype specs if write is True, a pydra Task class will be written to the file together with tests """ input_fields, inp_templates = self.convert_input_fields() output_fields = self.convert_output_spec(fields_from_template=inp_templates) - nonstd_types = set( - f[1] - for f in input_fields - if f[1].__module__ not in ["builtins", "pathlib", "typing"] - ) + nonstd_types = set() + + def add_nonstd_types(tp): + if ty.get_origin(tp) in (list, ty.Union): + for tp_arg in ty.get_args(tp): + add_nonstd_types(tp_arg) + elif tp.__module__ not in ["builtins", "pathlib", "typing"]: + nonstd_types.add(tp) + + for f in input_fields: + add_nonstd_types(f[1]) output_file = ( Path(package_root) @@ -448,7 +455,8 @@ def generate(self, package_root: Path): input_fields=input_fields, nonstd_types=nonstd_types, ) - # self.write_tests(filename_test=filename_test_run, run=True) + # with open(testdir / "conftest.py", "w") as f: + # f.write(self.CONFTEST) def convert_input_fields(self): """creating fields list for pydra input spec""" @@ -628,7 +636,7 @@ def pydra_type_converter(self, field, spec_type, name): tp_pdr = dict elif isinstance(tp, traits_extension.InputMultiObject): if isinstance(field.inner_traits[0].trait_type, traits_extension.File): - tp_pdr = specs.MultiInputFile + tp_pdr = ty.List[File] else: tp_pdr = specs.MultiInputObj elif isinstance(tp, traits_extension.OutputMultiObject): @@ -639,7 +647,7 @@ def pydra_type_converter(self, field, spec_type, name): elif isinstance(tp, traits.trait_types.List): if isinstance(field.inner_traits[0].trait_type, traits_extension.File): if spec_type == "input": - tp_pdr = specs.MultiInputFile + tp_pdr = ty.List[File] else: tp_pdr = specs.MultiOutputFile else: @@ -680,14 +688,14 @@ def types_to_names(spec_fields): spec_fields_str = [] for el in spec_fields: el = list(el) - try: - el[1] = el[1].__name__ - # add 'TYPE_' to the beginning of the name - el[1] = "TYPE_" + el[1] - except AttributeError: - el[1] = el[1]._name - # add 'TYPE_' to the beginning of the name - el[1] = "TYPE_" + el[1] + tp_str = str(el[1]) + if tp_str.startswith(">> task.inputs.{nm} = {val}\n" doctest_str += " >>> task.cmdline\n" doctest_str += f" '{doctest.cmdline}'" - doctest_str += "\n" + doctest_str += "\n\n\n" imports = self.construct_imports(nonstd_types, doctest_str) if imports: - doctest_str = " >>> " + "\n >>> ".join(imports) + doctest_str + doctest_str = " >>> " + "\n >>> ".join(imports) + "\n\n" + doctest_str - return " Examples\n -------\n" + doctest_str + return " Examples\n -------\n\n" + doctest_str INPUT_KEYS = [ "allowed_values", @@ -905,17 +911,74 @@ def create_doctests(self, input_fields, nonstd_types): "trait_modified", ] - TYPE_REPLACE = [ - ("'TYPE_File'", "File"), - ("'TYPE_bool'", "bool"), - ("'TYPE_str'", "str"), - ("'TYPE_Any'", "ty.Any"), - ("'TYPE_int'", "int"), - ("'TYPE_float'", "float"), - ("'TYPE_list'", "list"), - ("'TYPE_dict'", "dict"), - ("'TYPE_MultiInputObj'", "specs.MultiInputObj"), - ("'TYPE_MultiOutputObj'", "specs.MultiOutputObj"), - ("'TYPE_MultiInputFile'", "specs.MultiInputFile"), - ("'TYPE_MultiOutputFile'", "specs.MultiOutputFile"), - ] + CONFTEST = """import time +from traceback import format_exc +import threading +from dataclasses import dataclass +from _pytest.runner import TestReport + + +def pass_after_timout(seconds, poll_interval=0.1): + \"\"\"Cancel the test after a certain period, after which it is assumed that the arguments + passed to the underying command have passed its internal validation (so we don't have + to wait until the tool completes) + + Parameters + ---------- + seconds : int + the number of seconds to wait until cancelling the test (and marking it as passed) + \"\"\" + + def decorator(test_func): + def wrapper(*args, **kwargs): + @dataclass + class TestState: + \"\"\"A way of passing a reference to the result that can be updated by + the test thread\"\"\" + + result = None + trace_back = None + + state = TestState() + + def test_runner(): + try: + state.result = test_func(*args, **kwargs) + except Exception as e: + state.trace_back = format_exc() + raise + + thread = threading.Thread(target=test_runner) + thread.start() + + # Calculate the end time for the timeout + end_time = time.time() + seconds + + while thread.is_alive() and time.time() < end_time: + time.sleep(poll_interval) + + if thread.is_alive(): + thread.join() + return state.result + + if state.trace_back: + raise state.trace_back + + outcome = "passed after timeout" + rep = TestReport.from_item_and_call( + item=args[0], + when="call", + excinfo=None, + outcome=outcome, + sections=None, + duration=0, + keywords=None, + ) + args[0].ihook.pytest_runtest_logreport(report=rep) + + return state.result + + return wrapper + + return decorator +""" diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index b9f2deff..bd954b6c 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -4,6 +4,7 @@ import re from importlib import import_module from copy import copy +import subprocess as sp import shutil import tarfile from pathlib import Path @@ -96,19 +97,18 @@ def generate_packages( for pkg in to_import["packages"]: pkg_dir = initialise_task_repo(output_dir, task_template, pkg) + spec_dir = pkg_dir / "nipype-auto-conv" / "specs" + spec_dir.mkdir(parents=True, exist_ok=True) + # Loop through all nipype modules and create specs for their auto-conversion for module, interfaces in to_import["interfaces"].items(): if module.split("/")[0] != pkg: continue - module_spec_dir = (pkg_dir / "nipype-auto-conv" / "specs").joinpath( - *module.split("/")[1:] - ) - module_spec_dir.mkdir(parents=True, exist_ok=True) - # Loop through all interfaces in module for interface in interfaces: - callables_fspath = module_spec_dir / f"{interface}_callables.py" + spec_name = interface.lower() + callables_fspath = spec_dir / f"{spec_name}_callables.py" spec_stub = {} # Import interface from module @@ -356,19 +356,19 @@ def combine_types(type_, prev_type): yaml_str, ) - with open(module_spec_dir / (interface + ".yaml"), "w") as f: + with open(spec_dir / (spec_name + ".yaml"), "w") as f: f.write(preamble + yaml_str) with open(callables_fspath, "w") as f: f.write( f'"""Module to put any functions that are referred to in {interface}.yaml"""\n' ) - # sp.check_call("git init", shell=True, cwd=pkg_dir) - # sp.check_call("git add --all", shell=True, cwd=pkg_dir) - # sp.check_call( - # 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir - # ) - # sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) + sp.check_call("git init", shell=True, cwd=pkg_dir) + sp.check_call("git add --all", shell=True, cwd=pkg_dir) + sp.check_call( + 'git commit -m"initial commit of generated stubs"', shell=True, cwd=pkg_dir + ) + sp.check_call("git tag 0.1.0", shell=True, cwd=pkg_dir) unmatched_extensions = set( File.decompose_fspath( @@ -421,6 +421,9 @@ def copy_ignore(_, names): gh_workflows_dir / "auto-release.yaml", ) + # Add modified README + shutil.copy(RESOURCES_DIR / "README.md", pkg_dir / "README.md") + # Add in conftest.py shutil.copy(RESOURCES_DIR / "conftest.py", pkg_dir / "conftest.py") @@ -468,6 +471,9 @@ def generate_spec_preamble( elif type(inpt.trait_type).__name__ == "InputMultiObject": file_inputs.append(inpt_name) multi_inputs.append(inpt_name) + elif type(inpt.trait_type).__name__ == "List" and type(inpt.trait_type.inner_traits()[0].handler).__name__ == "File": + file_inputs.append(inpt_name) + multi_inputs.append(inpt_name) file_outputs = [] outputs_desc = "" if nipype_interface.output_spec: diff --git a/scripts/pkg_gen/resources/README.md b/scripts/pkg_gen/resources/README.md new file mode 100644 index 00000000..0ffe8c69 --- /dev/null +++ b/scripts/pkg_gen/resources/README.md @@ -0,0 +1,47 @@ +# Pydra task package for CHANGEME + +This package contains a collection of Pydra task interfaces for CHANGEME. The basis for +which have been semi-automatically + +## Tests + +This package comes with a default set of test modules, and we encourage users to use pytest. +Tests can be discovered and run using: + +``` +pytest --doctest-modules pydra/tasks/* +``` + +## Continuous integration + +This template uses [GitHub Actions](https://docs.github.com/en/actions/) to run tests. To simulate +several plausible development or installation environments, we test over all Python versions +supported by Pydra, and install Pydra and the current package in both standard and +[editable](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs) modes. + +The combination of standard/editable is in particular designed to ensure that namespace packaging +does not break. We strongly recommend keeping these tests in place for this reason, as one +non-compliant package can potentially affect Pydra or other task packages. + +In addition to verifying installations do not break or conflict, pytest is run on the package, +including all tests found in `test/` directories and [doctests]. + +Finally, packages are built and uploaded as artifacts for inspection. When a tag is pushed, +the packages are uploaded to PyPI if a valid [API token](https://pypi.org/help/#apitoken) is placed +in the [repository secrets](https://docs.github.com/en/actions/reference/encrypted-secrets). + +[doctests]: https://docs.python.org/3/library/doctest.html + +# Contributing to this package + +## For developers + +Install repo in developer mode from the source directory. It is also useful to +install pre-commit to take care of styling via [black](https://black.readthedocs.io/): + +``` +pip install -e .[dev] +pre-commit install +``` + + diff --git a/scripts/pkg_gen/resources/nipype-auto-convert.py b/scripts/pkg_gen/resources/nipype-auto-convert.py index cacc9d15..fc5c3eb2 100644 --- a/scripts/pkg_gen/resources/nipype-auto-convert.py +++ b/scripts/pkg_gen/resources/nipype-auto-convert.py @@ -34,11 +34,12 @@ with open(fspath) as f: spec = yaml.load(f, Loader=yaml.SafeLoader) - rel_pkg_path = ( - str(fspath.parent.relative_to(SPECS_DIR)).replace(os.path.sep, ".") - + "." - + fspath.stem - ) + rel_pkg_path = str(fspath.parent.relative_to(SPECS_DIR)).replace(os.path.sep, ".") + if rel_pkg_path == ".": + rel_pkg_path = fspath.stem + else: + rel_pkg_path += "." + fspath.stem + callables = import_module(rel_pkg_path + "_callables") module_name = fspath.stem.lower() diff --git a/scripts/pkg_gen/resources/pkg_init.py b/scripts/pkg_gen/resources/pkg_init.py index a5644cd8..75afa885 100644 --- a/scripts/pkg_gen/resources/pkg_init.py +++ b/scripts/pkg_gen/resources/pkg_init.py @@ -7,32 +7,31 @@ """ from warnings import warn from pathlib import Path + pkg_path = Path(__file__).parent.parent try: - from ._version import __version__ as main_version + from ._version import __version__ except ImportError: raise RuntimeError( "pydra-CHANGEME has not been properly installed, please run " f"`pip install -e {str(pkg_path)}` to install a development version" ) -try: - from .auto._version import nipype_version, nipype2pydra_version -except ImportError: - warn( - "Nipype interfaces haven't been automatically converted from their specs in " - f"`nipype-auto-conv`. Please run `{str(pkg_path / 'nipype-auto-conv' / 'generate')}` " - "to generated the converted Nipype interfaces in pydra.tasks.CHANGEME.auto" - ) - __version__ = main_version -else: - n_ver = nipype_version.replace(".", "_") - n2p_ver = nipype2pydra_version.replace(".", "_") - __version__ = ( - main_version - + ("_" if "+" in main_version else "+") - + f"nipype{n_ver}_nipype2pydra{n2p_ver}" - ) +if "nipype" not in __version__: + try: + from .auto._version import nipype_version, nipype2pydra_version + except ImportError: + warn( + "Nipype interfaces haven't been automatically converted from their specs in " + f"`nipype-auto-conv`. Please run `{str(pkg_path / 'nipype-auto-conv' / 'generate')}` " + "to generated the converted Nipype interfaces in pydra.tasks.CHANGEME.auto" + ) + else: + n_ver = nipype_version.replace(".", "_") + n2p_ver = nipype2pydra_version.replace(".", "_") + __version__ += ( + "_" if "+" in __version__ else "+" + ) + f"nipype{n_ver}_nipype2pydra{n2p_ver}" __all__ = ["__version__"] From 913e3a2a514367667037713b31d36c19b5236f27 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 15:58:47 +1000 Subject: [PATCH 26/42] added new field for task name --- nipype2pydra/task.py | 10 +++------- scripts/pkg_gen/create_packages.py | 27 ++++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 8bae92e1..f9d8a820 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -358,12 +358,12 @@ class TaskConverter: a module, or path to a module, containing any required callables """ - name: str + task_name: str + nipype_name: str nipype_module: ModuleType = attrs.field( converter=lambda m: import_module(m) if not isinstance(m, ModuleType) else m ) output_module: str = attrs.field(default=None) - new_name: str = attrs.field(default=None) inputs: InputsConverter = attrs.field( factory=InputsConverter, converter=from_dict_to_inputs ) @@ -400,7 +400,7 @@ def __attrs_post_init__(self): @property def nipype_interface(self) -> nipype.interfaces.base.BaseInterface: - return getattr(self.nipype_module, self.name) + return getattr(self.nipype_module, self.nipype_name) @property def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: @@ -410,10 +410,6 @@ def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: def nipype_output_spec(self) -> nipype.interfaces.base.BaseTraitedSpec: return self.nipype_interface.output_spec() - @property - def task_name(self): - return self.new_name if self.new_name is not None else self.name - def generate(self, package_root: Path, with_conftest: bool = True): """creating pydra input/output spec from nipype specs if write is True, a pydra Task class will be written to the file together with tests diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index bd954b6c..b1e436a3 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -304,7 +304,8 @@ def combine_types(type_, prev_type): } spec_stub = { - "name": interface, + "task_name": to_snake_case(interface), + "nipype_name": interface, "nipype_module": nipype_module_str, "new_name": None, "inputs": fields_stub( @@ -347,9 +348,9 @@ def combine_types(type_, prev_type): tp_name = tp.__name__ else: tp_name = str(tp).lower().replace("typing.", "") - comment = f" # {tp_name} - " + field.metadata[ + comment = f" # {tp_name} - " + field.metadata[ "help" - ].replace("\n ", "\n # ") + ].replace("\n ", "\n # ") yaml_str = re.sub( f" {category_name}.{field.name}:" + r"(.*)", f" {field.name}:" + r"\1" + f"\n{comment}", @@ -596,6 +597,26 @@ def extract_doctest_inputs( return cmdline, doctest_inpts, directive, imports +def to_snake_case(name: str) -> str: + """ + Converts a PascalCase string to a snake_case one + """ + snake_str = '' + + # Loop through each character in the input string + for i, char in enumerate(name): + # If the current character is uppercase and it's not the first character, + # add an underscore before it and convert it to lowercase + if char.isupper() and i > 0: + snake_str += '_' + snake_str += char.lower() + else: + # Otherwise, just add the character as it is + snake_str += char.lower() + + return snake_str + + if __name__ == "__main__": import sys From 7044f65742b69b963e3cfc09459bbef3dcef1b07 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 16:03:17 +1000 Subject: [PATCH 27/42] removed 'new_name' field from stubs --- scripts/pkg_gen/create_packages.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index b1e436a3..14ab3abd 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -307,7 +307,6 @@ def combine_types(type_, prev_type): "task_name": to_snake_case(interface), "nipype_name": interface, "nipype_module": nipype_module_str, - "new_name": None, "inputs": fields_stub( "inputs", InputsConverter, From 5d91c8ebb93f02585eb38f774d1fc9afbabe409e Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 21:15:28 +1000 Subject: [PATCH 28/42] various debugging --- nipype2pydra/task.py | 57 ++++++++++--------- scripts/pkg_gen/create_packages.py | 52 +++++++++-------- scripts/pkg_gen/resources/README.md | 50 +++++++++------- .../resources/gh_workflows/auto-release.yaml | 36 ------------ .../resources/gh_workflows/pythonpackage.yaml | 55 +++++++++++++++--- .../nipype-auto-convert-requirements.txt | 1 + .../pkg_gen/resources/nipype-auto-convert.py | 8 ++- .../{conftest.py => timeout_pass.py} | 56 +----------------- 8 files changed, 145 insertions(+), 170 deletions(-) delete mode 100644 scripts/pkg_gen/resources/gh_workflows/auto-release.yaml rename scripts/pkg_gen/resources/{conftest.py => timeout_pass.py} (61%) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index f9d8a820..47bce2da 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -410,7 +410,7 @@ def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: def nipype_output_spec(self) -> nipype.interfaces.base.BaseTraitedSpec: return self.nipype_interface.output_spec() - def generate(self, package_root: Path, with_conftest: bool = True): + def generate(self, package_root: Path): """creating pydra input/output spec from nipype specs if write is True, a pydra Task class will be written to the file together with tests """ @@ -451,8 +451,6 @@ def add_nonstd_types(tp): input_fields=input_fields, nonstd_types=nonstd_types, ) - # with open(testdir / "conftest.py", "w") as f: - # f.write(self.CONFTEST) def convert_input_fields(self): """creating fields list for pydra input spec""" @@ -505,10 +503,13 @@ def pydra_fld_input(self, field, nm): if getattr(field, "name_template"): template = getattr(field, "name_template") name_source = ensure_list(getattr(field, "name_source")) - - metadata_pdr["output_file_template"] = self.string_formats( - argstr=template, name=name_source[0] - ) + if name_source: + tmpl = self.string_formats( + argstr=template, name=name_source[0] + ) + else: + tmpl = template + metadata_pdr["output_file_template"] = tmpl if tp_pdr in [specs.File, specs.Directory]: tp_pdr = str elif getattr(field, "genfile"): @@ -660,22 +661,15 @@ def pydra_type_converter(self, field, spec_type, name): return tp_pdr def string_formats(self, argstr, name): - import re - - if "%s" in argstr: - argstr_new = argstr.replace("%s", f"{{{name}}}") - elif "%d" in argstr: - argstr_new = argstr.replace("%d", f"{{{name}}}") - elif "%f" in argstr: - argstr_new = argstr.replace("%f", f"{{{name}}}") - elif "%g" in argstr: - argstr_new = argstr.replace("%g", f"{{{name}}}") - elif len(re.findall("%[0-9.]+f", argstr)) == 1: - old_format = re.findall("%[0-9.]+f", argstr)[0] - argstr_new = argstr.replace(old_format, f"{{{name}:{old_format[1:]}}}") - else: - raise Exception(f"format from {argstr} is not supported TODO") - return argstr_new + keys = re.findall(r"(%[0-9\.]*(?:s|d|i|g|f))", argstr) + new_argstr = argstr + for i, key in enumerate(keys): + repl = f"{name}" if len(keys) == 1 else f"{name}[{i}]" + match = re.match(r"%([0-9\.]+)f", key) + if match: + repl += ":" + match.group(1) + new_argstr = new_argstr.replace(key, r"{" + repl + r"}", 1) + return new_argstr def write_task(self, filename, input_fields, nonstd_types, output_fields): """writing pydra task to the dile based on the input and output spec""" @@ -740,9 +734,9 @@ def construct_imports( stmts: ty.Dict[str, str] = {} def add_import(stmt): - match = re.match(r".* as (\w+)\s*", stmt) + match = re.match(r".*\s+as\s+(\w+)\s*", stmt) if not match: - match = re.match(r".*import (\w+)\s*$", stmt) + match = re.match(r".*import\s+(\w+)\s*$", stmt) if not match: raise ValueError(f"Unrecognised import statment {stmt}") token = match.group(1) @@ -767,6 +761,8 @@ def add_import(stmt): add_import("from pathlib import Path") for test in self.tests: for stmt in test.imports: + if stmt.module.startswith("nipype.testing"): + continue if stmt.name is None: add_import(f"import {stmt.module}") else: @@ -853,6 +849,11 @@ def write_tests(self, filename_test, input_fields, nonstd_types, run=False): with open(filename_test, "w") as f: f.write(spec_str_black) + conftest_fspath = filename_test.parent / "conftest.py" + if not conftest_fspath.exists(): + with open(conftest_fspath, "w") as f: + f.write(self.TIMEOUT_PASS) + def create_doctests(self, input_fields, nonstd_types): """adding doctests to the interfaces""" doctest_str = "" @@ -907,14 +908,14 @@ def create_doctests(self, input_fields, nonstd_types): "trait_modified", ] - CONFTEST = """import time + TIMEOUT_PASS = """import time from traceback import format_exc import threading from dataclasses import dataclass from _pytest.runner import TestReport -def pass_after_timout(seconds, poll_interval=0.1): +def pass_after_timeout(seconds, poll_interval=0.1): \"\"\"Cancel the test after a certain period, after which it is assumed that the arguments passed to the underying command have passed its internal validation (so we don't have to wait until the tool completes) @@ -940,7 +941,7 @@ class TestState: def test_runner(): try: state.result = test_func(*args, **kwargs) - except Exception as e: + except Exception: state.trace_back = format_exc() raise diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 14ab3abd..c4237150 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -19,7 +19,7 @@ from fileformats.medimage import Nifti1, NiftiGz, Bval, Bvec from fileformats.misc import Dicom from fileformats.text import TextFile -from fileformats.datascience import MatFile, DatFile +from fileformats.datascience import TextMatrix, DatFile from fileformats.serialization import Xml import nipype.interfaces.base.core from nipype2pydra.task import ( @@ -32,7 +32,7 @@ RESOURCES_DIR = Path(__file__).parent / "resources" -EXPECTED_FORMATS = [Nifti1, NiftiGz, TextFile, MatFile, DatFile, Xml] +EXPECTED_FORMATS = [Nifti1, NiftiGz, TextFile, TextMatrix, DatFile, Xml] def download_tasks_template(output_path: Path): @@ -347,9 +347,9 @@ def combine_types(type_, prev_type): tp_name = tp.__name__ else: tp_name = str(tp).lower().replace("typing.", "") - comment = f" # {tp_name} - " + field.metadata[ - "help" - ].replace("\n ", "\n # ") + comment = f" # {tp_name} - " + field.metadata["help"].replace( + "\n ", "\n # " + ) yaml_str = re.sub( f" {category_name}.{field.name}:" + r"(.*)", f" {field.name}:" + r"\1" + f"\n{comment}", @@ -416,26 +416,21 @@ def copy_ignore(_, names): RESOURCES_DIR / "gh_workflows" / "pythonpackage.yaml", gh_workflows_dir / "pythonpackage.yaml", ) - shutil.copy( - RESOURCES_DIR / "gh_workflows" / "auto-release.yaml", - gh_workflows_dir / "auto-release.yaml", - ) # Add modified README shutil.copy(RESOURCES_DIR / "README.md", pkg_dir / "README.md") - # Add in conftest.py - shutil.copy(RESOURCES_DIR / "conftest.py", pkg_dir / "conftest.py") - # Add "pydra.tasks..auto to gitignore" with open(pkg_dir / ".gitignore", "a") as f: - f.write(f"\npydra/tasks/{pkg}/auto") + f.write(f"\n/pydra/tasks/{pkg}/auto" f"\n/pydra/tasks/_version.py\n") # rename tasks directory (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename(pkg_dir / "pydra" / "tasks" / pkg) # Add in modified __init__.py - shutil.copy(RESOURCES_DIR / "pkg_init.py", pkg_dir / "pydra" / "tasks" / pkg / "__init__.py") + shutil.copy( + RESOURCES_DIR / "pkg_init.py", pkg_dir / "pydra" / "tasks" / pkg / "__init__.py" + ) # Replace "CHANGEME" string with pkg name for fspath in pkg_dir.glob("**/*"): @@ -471,7 +466,10 @@ def generate_spec_preamble( elif type(inpt.trait_type).__name__ == "InputMultiObject": file_inputs.append(inpt_name) multi_inputs.append(inpt_name) - elif type(inpt.trait_type).__name__ == "List" and type(inpt.trait_type.inner_traits()[0].handler).__name__ == "File": + elif ( + type(inpt.trait_type).__name__ == "List" + and type(inpt.trait_type.inner_traits()[0].handler).__name__ == "File" + ): file_inputs.append(inpt_name) multi_inputs.append(inpt_name) file_outputs = [] @@ -536,14 +534,14 @@ def extract_doctest_inputs( if match: cmdline = match.group(3) cmdline = re.sub(r"\s+", " ", cmdline) - cmdline = cmdline.replace("'", '"') + cmdline = cmdline.replace("'", '"') if '"' not in cmdline else cmdline directive = match.group(2) if directive == '"': directive = None else: cmdline = directive = None doctest_inpts = { - n: v.replace("'", '"') + n: v.replace("'", '"') if '"' not in v else v for n, v in re.findall( r"""\s+>>> (?:\w+)\.inputs\.(\w+) ?= ?(.*)\n""", doctest, @@ -557,7 +555,7 @@ def extract_doctest_inputs( arg_str = match.group(1) + ", " doctest_inpts.update( { - n: v.replace("'", '"') + n: v.replace("'", '"') if '"' not in v else v for n, v in re.findall(r"(\w+) *= *([^=]+), *", arg_str) } ) @@ -600,14 +598,22 @@ def to_snake_case(name: str) -> str: """ Converts a PascalCase string to a snake_case one """ - snake_str = '' + snake_str = "" # Loop through each character in the input string for i, char in enumerate(name): - # If the current character is uppercase and it's not the first character, - # add an underscore before it and convert it to lowercase - if char.isupper() and i > 0: - snake_str += '_' + # If the current character is uppercase and it's not the first character or + # followed by another uppercase character, add an underscore before it and + # convert it to lowercase + if ( + i > 0 + and (char.isupper() or char.isdigit()) + and ( + not (name[i - 1].isupper() or name[i - 1].isdigit()) + or ((i + 1) < len(name) and (name[i + 1].islower() or name[i + 1].islower())) + ) + ): + snake_str += "_" snake_str += char.lower() else: # Otherwise, just add the character as it is diff --git a/scripts/pkg_gen/resources/README.md b/scripts/pkg_gen/resources/README.md index 0ffe8c69..7a79db9f 100644 --- a/scripts/pkg_gen/resources/README.md +++ b/scripts/pkg_gen/resources/README.md @@ -1,36 +1,48 @@ # Pydra task package for CHANGEME -This package contains a collection of Pydra task interfaces for CHANGEME. The basis for -which have been semi-automatically +This package contains a collection of Pydra task interfaces for CHANGEME. The basis of +which have been semi-automatically converted from the corresponding [Nipype](https://github.com/nipy/nipype) +interfaces. + +## Automatically-generated vs manually-curated tasks + +Automatically generated tasks can be found in the `pydra.tasks.CHANGEME.auto` package. +These packages should be treated with extreme caution as they likely do not pass testing. +Generated tasks that have been edited and pass testing are imported into one or more of the +`pydra.tasks.CHANGEME.v*` packages, corresponding to the version of the CHANGEME toolkit +they are designed for. ## Tests -This package comes with a default set of test modules, and we encourage users to use pytest. -Tests can be discovered and run using: +This package comes with a battery of automatically generated test modules. To install +the necessary dependencies to run the tests ``` -pytest --doctest-modules pydra/tasks/* +pip install -e .[test] ``` -## Continuous integration +Then the tests, including [doctests](https://docs.python.org/3/library/doctest.html), can be launched using -This template uses [GitHub Actions](https://docs.github.com/en/actions/) to run tests. To simulate -several plausible development or installation environments, we test over all Python versions -supported by Pydra, and install Pydra and the current package in both standard and -[editable](https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs) modes. +``` +pytest --doctest-modules pydra/tasks/* +``` -The combination of standard/editable is in particular designed to ensure that namespace packaging -does not break. We strongly recommend keeping these tests in place for this reason, as one -non-compliant package can potentially affect Pydra or other task packages. +By default, the tests are set to time-out after 10s, after which the underlying tool is +assumed to have passed the validation/initialisation phase and we assume that it will +run to completion. To disable this and run the test(s) through to completion run -In addition to verifying installations do not break or conflict, pytest is run on the package, -including all tests found in `test/` directories and [doctests]. +``` +pytest --doctest-modules --timeout-pass 0 pydra/tasks/* +``` -Finally, packages are built and uploaded as artifacts for inspection. When a tag is pushed, -the packages are uploaded to PyPI if a valid [API token](https://pypi.org/help/#apitoken) is placed -in the [repository secrets](https://docs.github.com/en/actions/reference/encrypted-secrets). +## Continuous integration -[doctests]: https://docs.python.org/3/library/doctest.html +This template uses [GitHub Actions](https://docs.github.com/en/actions/) to run tests and +deploy packages to PYPI. New packages are built and uploaded when releases are created on +GitHub, or new releases of Nipype or the Nipype2Pydra conversion tool are released. +Releases triggered by updates to Nipype or Nipype2Pydra are signified by the `postN` +suffix where N = with '.'s stripped, e.g. +`v0.2.3post185010` corresponds to the v0.2.3 tag of # Contributing to this package diff --git a/scripts/pkg_gen/resources/gh_workflows/auto-release.yaml b/scripts/pkg_gen/resources/gh_workflows/auto-release.yaml deleted file mode 100644 index 6ae1d61b..00000000 --- a/scripts/pkg_gen/resources/gh_workflows/auto-release.yaml +++ /dev/null @@ -1,36 +0,0 @@ -name: Generate Release - -on: - repository_dispatch: - types: [create-release] - -jobs: - create_release: - runs-on: ubuntu-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v2 - - - name: Generate Release - run: | - # Extract necessary information from the event payload - REPO_OWNER=${{ github.event.client_payload.repo_owner }} - REPO_NAME=${{ github.event.client_payload.repo_name }} - RELEASE_TAG=${{ github.event.client_payload.release_tag }} - RELEASE_NAME=${{ github.event.client_payload.release_name }} - RELEASE_BODY=${{ github.event.client_payload.release_body }} - - # Create a new release using the GitHub API - curl -X POST \ - -H "Accept: application/vnd.github.v3+json" \ - -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \ - -d '{ - "tag_name": "$RELEASE_TAG", - "target_commitish": "master", - "name": "$RELEASE_NAME", - "body": "$RELEASE_BODY", - "draft": false, - "prerelease": false - }' \ - "https://api.github.com/repos/nipype/pydra-#PACKAGE#/releases" \ No newline at end of file diff --git a/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml index 79868a58..3dcde127 100644 --- a/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml +++ b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml @@ -12,17 +12,23 @@ env: on: push: - branches: [ main ] + branches: [ main, develop ] tags: [ '*' ] pull_request: - branches: [ main ] + branches: [ main, develop ] + repository_dispatch: + types: [create-release] jobs: auto-gen: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - name: Checkout + uses: actions/checkout@v3 + - name: Revert version to most recent tag on upstream update + if: github.event_name == 'repository_dispatch' + run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 - name: Install build dependencies @@ -31,18 +37,31 @@ jobs: run: python -m pip install -r ./nipype-auto-conv/requirements.txt - name: Run automatic Nipype > Pydra conversion run: ./nipype-auto-conv/generate + - uses: actions/upload-artifact@v3 + with: + name: auto-generated + path: pydra/tasks/CHANGEME/auto devcheck: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, '3.10'] # Check oldest and newest versions + python-version: ['3.8', '3.11'] # Check oldest and newest versions pip-flags: ['', '--editable'] pydra: - 'pydra' - '--editable git+https://github.com/nipype/pydra.git#egg=pydra' steps: - - uses: actions/checkout@v3 + - name: Checkout + uses: actions/checkout@v3 + - name: Revert version to most recent tag on upstream update + if: github.event_name == 'repository_dispatch' + run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') + - name: Download auto-generated + uses: actions/download-artifact@v3 + with: + name: auto-generated + path: pydra/tasks/CHANGEME/auto - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: @@ -64,10 +83,17 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9, '3.10'] + python-version: ['3.8', '3.11'] steps: - uses: actions/checkout@v3 + - name: Revert version to most recent tag on upstream update + if: github.event_name == 'repository_dispatch' + run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') + - uses: actions/download-artifact@v3 + with: + name: auto-generated + path: pydra/tasks/CHANGEME/auto - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: @@ -92,12 +118,25 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.9] + python-version: ['3.11'] steps: - uses: actions/checkout@v3 with: submodules: recursive fetch-depth: 0 + - uses: actions/download-artifact@v3 + with: + name: auto-generated + path: pydra/tasks/CHANGEME/auto + - name: Tag release with a post-release based on Nipype and Nipype2Pydra versions + if: github.event_name == 'repository_dispatch' + run: | + TAG=$(git tag -l | tail -n 1 | awk -F post '{print $1}') + POST=$(python -c "from pydra.tasks.CHANGEME.auto._version import *; print(post_release)") + git checkout $TAG + git add -f pydra/tasks/CHANGEME/auto/_version.py + git commit -am"added auto-generated version to make new tag for package version" + git tag ${TAG}post${POST} - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 with: @@ -118,7 +157,7 @@ jobs: # [1] https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions#setting-an-output-parameter - name: Check for PyPI token on tag id: deployable - if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') + if: (github.event_name == 'push' && startsWith(github.ref, 'refs/tags')) || github.event_name == 'repository_dispatch' env: PYPI_API_TOKEN: "${{ secrets.PYPI_API_TOKEN }}" run: if [ -n "$PYPI_API_TOKEN" ]; then echo "DEPLOY=true" >> $GITHUB_OUTPUT; fi diff --git a/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt b/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt index 7067fce1..fae44d4c 100644 --- a/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt +++ b/scripts/pkg_gen/resources/nipype-auto-convert-requirements.txt @@ -5,5 +5,6 @@ pydra PyYAML>=6.0 fileformats >=0.8 fileformats-medimage >=0.4 +fileformats-datascience >= 0.1 traits nipype2pydra \ No newline at end of file diff --git a/scripts/pkg_gen/resources/nipype-auto-convert.py b/scripts/pkg_gen/resources/nipype-auto-convert.py index fc5c3eb2..faf4e1dd 100644 --- a/scripts/pkg_gen/resources/nipype-auto-convert.py +++ b/scripts/pkg_gen/resources/nipype-auto-convert.py @@ -3,6 +3,7 @@ import os.path from warnings import warn from pathlib import Path +import shutil from importlib import import_module import yaml import nipype @@ -30,7 +31,9 @@ auto_init = f"# Auto-generated by {__file__}, do not edit as it will be overwritten\n\n" -for fspath in SPECS_DIR.glob("**/*.yaml"): +shutil.rmtree(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto") + +for fspath in sorted(SPECS_DIR.glob("**/*.yaml")): with open(fspath) as f: spec = yaml.load(f, Loader=yaml.SafeLoader) @@ -56,8 +59,9 @@ f.write( f"""# Auto-generated by {__file__}, do not edit as it will be overwritten -nipype_version = "{nipype.__version__}" +nipype_version = "{nipype.__version__.split('.dev')[0]}" nipype2pydra_version = "{nipype2pydra.__version__.split('.dev')[0]}" +post_release = (nipype_version + nipype2pydra_version).replace(".", "") """ ) diff --git a/scripts/pkg_gen/resources/conftest.py b/scripts/pkg_gen/resources/timeout_pass.py similarity index 61% rename from scripts/pkg_gen/resources/conftest.py rename to scripts/pkg_gen/resources/timeout_pass.py index 599a8ab4..2b9d4253 100644 --- a/scripts/pkg_gen/resources/conftest.py +++ b/scripts/pkg_gen/resources/timeout_pass.py @@ -1,59 +1,7 @@ -import os -import time -import logging -from pathlib import Path -from traceback import format_exc -import tempfile -import threading -from dataclasses import dataclass -import pytest -from _pytest.runner import TestReport +from fileformats.generic import File, Directory, FsObject -try: - from pydra import set_input_validator - - set_input_validator(True) -except ImportError: - pass -from fileformats.core.utils import include_testing_package - -include_testing_package(True) - -# Set DEBUG logging for unittests - -log_level = logging.WARNING - -logger = logging.getLogger("fileformats") -logger.setLevel(log_level) - -sch = logging.StreamHandler() -sch.setLevel(log_level) -formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") -sch.setFormatter(formatter) -logger.addHandler(sch) - - -# For debugging in IDE's don't catch raised exceptions and let the IDE -# break at it -if os.getenv("_PYTEST_RAISE", "0") != "0": - - @pytest.hookimpl(tryfirst=True) - def pytest_exception_interact(call): - raise call.excinfo.value - - @pytest.hookimpl(tryfirst=True) - def pytest_internalerror(excinfo): - raise excinfo.value - - -@pytest.fixture -def work_dir(): - work_dir = tempfile.mkdtemp() - return Path(work_dir) - - -def pass_after_timout(seconds, poll_interval=0.1): +def pass_after_timeout(seconds, poll_interval=0.1): """Cancel the test after a certain period, after which it is assumed that the arguments passed to the underying command have passed its internal validation (so we don't have to wait until the tool completes) From 2e15096d2b59cf461cf58af70240be0d4e73fb24 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 21:40:31 +1000 Subject: [PATCH 29/42] finished writing template README --- scripts/pkg_gen/resources/README.md | 60 ++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 9 deletions(-) diff --git a/scripts/pkg_gen/resources/README.md b/scripts/pkg_gen/resources/README.md index 7a79db9f..78a540af 100644 --- a/scripts/pkg_gen/resources/README.md +++ b/scripts/pkg_gen/resources/README.md @@ -18,13 +18,13 @@ This package comes with a battery of automatically generated test modules. To in the necessary dependencies to run the tests ``` -pip install -e .[test] +$ pip install -e .[test] ``` Then the tests, including [doctests](https://docs.python.org/3/library/doctest.html), can be launched using ``` -pytest --doctest-modules pydra/tasks/* +$ pytest --doctest-modules pydra/tasks/* ``` By default, the tests are set to time-out after 10s, after which the underlying tool is @@ -32,7 +32,7 @@ assumed to have passed the validation/initialisation phase and we assume that it run to completion. To disable this and run the test(s) through to completion run ``` -pytest --doctest-modules --timeout-pass 0 pydra/tasks/* +$ pytest --doctest-modules --timeout-pass 0 pydra/tasks/* ``` ## Continuous integration @@ -42,18 +42,60 @@ deploy packages to PYPI. New packages are built and uploaded when releases are c GitHub, or new releases of Nipype or the Nipype2Pydra conversion tool are released. Releases triggered by updates to Nipype or Nipype2Pydra are signified by the `postN` suffix where N = with '.'s stripped, e.g. -`v0.2.3post185010` corresponds to the v0.2.3 tag of +`v0.2.3post185010` corresponds to the v0.2.3 tag of this repository with auto-generated +packages from Nipype 1.8.5 using Nipype2Pydra 0.1.0. # Contributing to this package -## For developers +## Developer installation -Install repo in developer mode from the source directory. It is also useful to -install pre-commit to take care of styling via [black](https://black.readthedocs.io/): + +Install repo in developer mode from the source directory and install pre-commit to +ensure consistent code-style and quality. + +``` +$ pip install -e .[test,dev] +$ pre-commit install +``` + +Next install the requirements for running the auto-conversion script and generate the +Pydra task interfaces from their Nipype counterparts ``` -pip install -e .[dev] -pre-commit install +$ pip install -r nipype-auto-conv/requirements.txt +$ nipype-auto-gen/generate ``` +## Methodology + +The development of this package is expected to have two phases + +1. Where the corresponding Nipype interfaces are considered to be the ground truth, and + the Pydra tasks are generated from them +2. When the Pydra tasks are considered be mature and they are edited by hand + +Different tasks will probably mature at different times so there will probably be an +intermediate phase between 1 & 2. + +### Auto-conversion phase + +The auto-converted Pydra tasks are generated from their corresponding Nipype interface +in combination with "conversion hints" contained in YAML specs +located in `nipype-auto-gen/specs/`. The self-documented conversion specs are +to be edited by hand in order to assist the auto-converter produce valid pydra tasks. +After editing one or more conversion specs the `pydra.tasks.CHANGEME.auto` package should +be regenerated by running + +``` +$ nipype-auto-gen/generate +``` + +The tests should be run on the auto-generated tasks to see if they are valid + +``` +$ pytest --doctest-modules pydra/tasks/CHANGEME/auto/tests/test_.py +``` +If the test passes you should then edit the `pydra/tasks/CHANGEME/v/__init__.py` file +to import the now valid task interface to signify that it has been validated and is ready +for use. \ No newline at end of file From f4416a7bcb579890de3ec27450ed1bfef7c8ecfb Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 21:41:37 +1000 Subject: [PATCH 30/42] removed timeout_pass --- scripts/pkg_gen/resources/timeout_pass.py | 68 ----------------------- 1 file changed, 68 deletions(-) delete mode 100644 scripts/pkg_gen/resources/timeout_pass.py diff --git a/scripts/pkg_gen/resources/timeout_pass.py b/scripts/pkg_gen/resources/timeout_pass.py deleted file mode 100644 index 2b9d4253..00000000 --- a/scripts/pkg_gen/resources/timeout_pass.py +++ /dev/null @@ -1,68 +0,0 @@ -from fileformats.generic import File, Directory, FsObject - - -def pass_after_timeout(seconds, poll_interval=0.1): - """Cancel the test after a certain period, after which it is assumed that the arguments - passed to the underying command have passed its internal validation (so we don't have - to wait until the tool completes) - - Parameters - ---------- - seconds : int - the number of seconds to wait until cancelling the test (and marking it as passed) - """ - - def decorator(test_func): - def wrapper(*args, **kwargs): - @dataclass - class TestState: - """A way of passing a reference to the result that can be updated by - the test thread""" - - result = None - exception = None - - state = TestState() - - def test_runner(): - try: - state.result = test_func(*args, **kwargs) - except Exception as e: - state.exception = e - # raise - # state.trace_back = format_exc() - # raise - - thread = threading.Thread(target=test_runner) - thread.start() - - # Calculate the end time for the timeout - end_time = time.time() + seconds - - while thread.is_alive() and time.time() < end_time: - time.sleep(poll_interval) - - if thread.is_alive(): - thread.join() - return state.result - - if state.trace_back: - raise state.exception - - outcome = "passed after timeout" - rep = TestReport.from_item_and_call( - item=args[0], - when="call", - excinfo=None, - outcome=outcome, - sections=None, - duration=0, - keywords=None, - ) - args[0].ihook.pytest_runtest_logreport(report=rep) - - return state.result - - return wrapper - - return decorator From 37b190349cd5ff37e607f9fb928b525770db2964 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 22:30:28 +1000 Subject: [PATCH 31/42] touched up readme and conversions --- nipype2pydra/task.py | 34 +++++++++++-------- nipype2pydra/utils.py | 28 +++++++++++++++ scripts/pkg_gen/create_packages.py | 33 ++---------------- scripts/pkg_gen/resources/README.md | 6 +++- .../pkg_gen/resources/nipype-auto-convert.py | 9 ++--- 5 files changed, 61 insertions(+), 49 deletions(-) diff --git a/nipype2pydra/task.py b/nipype2pydra/task.py index 47bce2da..f6449790 100644 --- a/nipype2pydra/task.py +++ b/nipype2pydra/task.py @@ -384,12 +384,8 @@ class TaskConverter: def __attrs_post_init__(self): if self.output_module is None: if self.nipype_module.__name__.startswith("nipype.interfaces."): - self.output_module = ( - "pydra.tasks." - + self.nipype_module.__name__[len("nipype.interfaces.") :] - + "." - + self.task_name.lower() - ) + pkg_name = self.nipype_module.__name__.split(".")[2] + self.output_module = f"pydra.tasks.{pkg_name}.auto.{self.task_name}" else: raise RuntimeError( "Output-module needs to be explicitly provided to task converter " @@ -404,11 +400,11 @@ def nipype_interface(self) -> nipype.interfaces.base.BaseInterface: @property def nipype_input_spec(self) -> nipype.interfaces.base.BaseInterfaceInputSpec: - return self.nipype_interface.input_spec() + return self.nipype_interface.input_spec() if self.nipype_interface.input_spec else None @property def nipype_output_spec(self) -> nipype.interfaces.base.BaseTraitedSpec: - return self.nipype_interface.output_spec() + return self.nipype_interface.output_spec() if self.nipype_interface.output_spec else None def generate(self, package_root: Path): """creating pydra input/output spec from nipype specs @@ -542,6 +538,8 @@ def pydra_fld_input(self, field, nm): def convert_output_spec(self, fields_from_template): """creating fields list for pydra input spec""" fields_pdr_l = [] + if not self.nipype_output_spec: + return fields_pdr_l for name, fld in self.nipype_output_spec.traits().items(): if name in self.outputs.requirements and name not in fields_from_template: fld_pdr = self.pydra_fld_output(fld, name) @@ -689,6 +687,16 @@ def types_to_names(spec_fields): spec_fields_str.append(tuple(el)) return spec_fields_str + + base_imports = ["from pydra.engine import specs",] + if hasattr(self.nipype_interface, "_cmd"): + task_base = "ShellCommandTask" + base_imports.append("from pydra.engine import ShellCommandTask") + else: + task_base = "FunctionTask" + base_imports.append("from pydra.engine.task import FunctionTask") + + input_fields_str = types_to_names(spec_fields=input_fields) output_fields_str = types_to_names(spec_fields=output_fields) functions_str = self.function_callables() @@ -697,7 +705,7 @@ def types_to_names(spec_fields): spec_str += f"{self.task_name}_input_spec = specs.SpecInfo(name='Input', fields=input_fields, bases=(specs.ShellSpec,))\n\n" spec_str += f"output_fields = {output_fields_str}\n" spec_str += f"{self.task_name}_output_spec = specs.SpecInfo(name='Output', fields=output_fields, bases=(specs.ShellOutSpec,))\n\n" - spec_str += f"class {self.task_name}(ShellCommandTask):\n" + spec_str += f"class {self.task_name}({task_base}):\n" spec_str += ' """\n' spec_str += self.create_doctests( input_fields=input_fields, nonstd_types=nonstd_types @@ -705,7 +713,8 @@ def types_to_names(spec_fields): spec_str += ' """\n' spec_str += f" input_spec = {self.task_name}_input_spec\n" spec_str += f" output_spec = {self.task_name}_output_spec\n" - spec_str += f" executable='{self.nipype_interface._cmd}'\n" + if task_base == "ShellCommandTask": + spec_str += f" executable='{self.nipype_interface._cmd}'\n" spec_str = re.sub(r"'#([^'#]+)#'", r"\1", spec_str) @@ -713,10 +722,7 @@ def types_to_names(spec_fields): nonstd_types, spec_str, include_task=False, - base=( - "from pydra.engine import specs", - "from pydra.engine import ShellCommandTask", - ), + base=base_imports, ) spec_str = "\n".join(imports) + "\n\n" + spec_str diff --git a/nipype2pydra/utils.py b/nipype2pydra/utils.py index dd2ed7b7..868c3270 100644 --- a/nipype2pydra/utils.py +++ b/nipype2pydra/utils.py @@ -73,3 +73,31 @@ def is_fileset(tp: type): and type(tp) is not ty.GenericAlias and issubclass(tp, FileSet) ) + + +def to_snake_case(name: str) -> str: + """ + Converts a PascalCase string to a snake_case one + """ + snake_str = "" + + # Loop through each character in the input string + for i, char in enumerate(name): + # If the current character is uppercase and it's not the first character or + # followed by another uppercase character, add an underscore before it and + # convert it to lowercase + if ( + i > 0 + and (char.isupper() or char.isdigit()) + and ( + not (name[i - 1].isupper() or name[i - 1].isdigit()) + or ((i + 1) < len(name) and (name[i + 1].islower() or name[i + 1].islower())) + ) + ): + snake_str += "_" + snake_str += char.lower() + else: + # Otherwise, just add the character as it is + snake_str += char.lower() + + return snake_str diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index c4237150..c6b875d7 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -28,6 +28,7 @@ TestGenerator, DocTestGenerator, ) +from nipype2pydra.utils import to_snake_case RESOURCES_DIR = Path(__file__).parent / "resources" @@ -107,7 +108,7 @@ def generate_packages( # Loop through all interfaces in module for interface in interfaces: - spec_name = interface.lower() + spec_name = to_snake_case(interface) callables_fspath = spec_dir / f"{spec_name}_callables.py" spec_stub = {} @@ -422,7 +423,7 @@ def copy_ignore(_, names): # Add "pydra.tasks..auto to gitignore" with open(pkg_dir / ".gitignore", "a") as f: - f.write(f"\n/pydra/tasks/{pkg}/auto" f"\n/pydra/tasks/_version.py\n") + f.write(f"\n/pydra/tasks/{pkg}/auto" f"\n/pydra/tasks/{pkg}/_version.py\n") # rename tasks directory (pkg_dir / "pydra" / "tasks" / "CHANGEME").rename(pkg_dir / "pydra" / "tasks" / pkg) @@ -594,34 +595,6 @@ def extract_doctest_inputs( return cmdline, doctest_inpts, directive, imports -def to_snake_case(name: str) -> str: - """ - Converts a PascalCase string to a snake_case one - """ - snake_str = "" - - # Loop through each character in the input string - for i, char in enumerate(name): - # If the current character is uppercase and it's not the first character or - # followed by another uppercase character, add an underscore before it and - # convert it to lowercase - if ( - i > 0 - and (char.isupper() or char.isdigit()) - and ( - not (name[i - 1].isupper() or name[i - 1].isdigit()) - or ((i + 1) < len(name) and (name[i + 1].islower() or name[i + 1].islower())) - ) - ): - snake_str += "_" - snake_str += char.lower() - else: - # Otherwise, just add the character as it is - snake_str += char.lower() - - return snake_str - - if __name__ == "__main__": import sys diff --git a/scripts/pkg_gen/resources/README.md b/scripts/pkg_gen/resources/README.md index 78a540af..accd0b43 100644 --- a/scripts/pkg_gen/resources/README.md +++ b/scripts/pkg_gen/resources/README.md @@ -98,4 +98,8 @@ $ pytest --doctest-modules pydra/tasks/CHANGEME/auto/tests/test_/__init__.py` file to import the now valid task interface to signify that it has been validated and is ready -for use. \ No newline at end of file +for use, e.g. + +```python +from pydra.tasks.CHANGEME.auto import edited_task +``` \ No newline at end of file diff --git a/scripts/pkg_gen/resources/nipype-auto-convert.py b/scripts/pkg_gen/resources/nipype-auto-convert.py index faf4e1dd..5c0a10f8 100644 --- a/scripts/pkg_gen/resources/nipype-auto-convert.py +++ b/scripts/pkg_gen/resources/nipype-auto-convert.py @@ -31,7 +31,9 @@ auto_init = f"# Auto-generated by {__file__}, do not edit as it will be overwritten\n\n" -shutil.rmtree(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto") +auto_dir = PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" +if auto_dir.exists(): + shutil.rmtree(auto_dir) for fspath in sorted(SPECS_DIR.glob("**/*.yaml")): with open(fspath) as f: @@ -44,15 +46,14 @@ rel_pkg_path += "." + fspath.stem callables = import_module(rel_pkg_path + "_callables") - module_name = fspath.stem.lower() converter = TaskConverter( - output_module=f"pydra.tasks.{PKG_NAME}.auto.{module_name}", + output_module=f"pydra.tasks.{PKG_NAME}.auto.{spec['task_name']}", callables_module=callables, # type: ignore **spec, ) converter.generate(PKG_ROOT) - auto_init += f"from .{module_name} import {converter.task_name}\n" + auto_init += f"from .{spec['task_name']} import {converter.task_name}\n" with open(PKG_ROOT / "pydra" / "tasks" / PKG_NAME / "auto" / "_version.py", "w") as f: From 4faa0b8580ffc817bcdf23d55c33861197e5740b Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 1 Aug 2023 22:37:13 +1000 Subject: [PATCH 32/42] updated example specs --- .../task/ants_n4_bias_field_correction.yaml | 333 ++++++ example-specs/task/ants_registration.yaml | 997 ++++++++++++++++++ .../task/ants_registration_Registration.yaml | 32 - ...ts_segmentation_N4BiasFieldCorrection.yaml | 21 - example-specs/task/apply_vol_transform.yaml | 159 +++ example-specs/task/extract_roi.yaml | 146 +++ ...eesurfer_preprocess_ApplyVolTransform.yaml | 21 - example-specs/task/fsl_utils_ExtractROI.yaml | 30 - 8 files changed, 1635 insertions(+), 104 deletions(-) create mode 100644 example-specs/task/ants_n4_bias_field_correction.yaml create mode 100644 example-specs/task/ants_registration.yaml delete mode 100644 example-specs/task/ants_registration_Registration.yaml delete mode 100644 example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml create mode 100644 example-specs/task/apply_vol_transform.yaml create mode 100644 example-specs/task/extract_roi.yaml delete mode 100644 example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml delete mode 100644 example-specs/task/fsl_utils_ExtractROI.yaml diff --git a/example-specs/task/ants_n4_bias_field_correction.yaml b/example-specs/task/ants_n4_bias_field_correction.yaml new file mode 100644 index 00000000..dfa4ab6b --- /dev/null +++ b/example-specs/task/ants_n4_bias_field_correction.yaml @@ -0,0 +1,333 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'nipype.interfaces.ants.segmentation.N4BiasFieldCorrection' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Inputs +# ------ +# dimension : enum +# image dimension (2, 3 or 4) +# input_image : file +# input for bias correction. Negative values or values close to zero should be processed prior to correction +# mask_image : file +# image to specify region to perform final bias correction in +# weight_image : file +# image for relative weighting (e.g. probability map of the white matter) of voxels during the B-spline fitting. +# output_image : str +# output file name +# bspline_fitting_distance : float +# +# bspline_order : int +# +# shrink_factor : int +# +# n_iterations : list +# +# convergence_threshold : float +# +# save_bias : bool +# True if the estimated bias should be saved to file. +# bias_image : file +# Filename for the estimated bias. +# copy_header : bool +# copy headers of the original image into the output (corrected) file +# rescale_intensities : bool +# [NOTE: Only ANTs>=2.1.0] At each iteration, a new intensity mapping is calculated and applied but there is nothing which constrains the new intensity range to be within certain values. The result is that the range can "drift" from the original at each iteration. This option rescales to the [min,max] range of the original image intensities within the user-specified mask. +# histogram_sharpening : tuple +# Three-values tuple of histogram sharpening parameters (FWHM, wienerNose, numberOfHistogramBins). These options describe the histogram sharpening parameters, i.e. the deconvolution step parameters described in the original N3 algorithm. The default values have been shown to work fairly well. +# num_threads : int +# Number of ITK threads to use +# args : str +# Additional parameters to the command +# environ : dict +# Environment variables +# +# Outputs +# ------- +# output_image : file +# Warped image +# bias_image : file +# Estimated bias +# +# Docs +# ---- +# +# Bias field correction. +# +# N4 is a variant of the popular N3 (nonparameteric nonuniform normalization) +# retrospective bias correction algorithm. Based on the assumption that the +# corruption of the low frequency bias field can be modeled as a convolution of +# the intensity histogram by a Gaussian, the basic algorithmic protocol is to +# iterate between deconvolving the intensity histogram by a Gaussian, remapping +# the intensities, and then spatially smoothing this result by a B-spline modeling +# of the bias field itself. The modifications from and improvements obtained over +# the original N3 algorithm are described in [Tustison2010]_. +# +# .. [Tustison2010] N. Tustison et al., +# N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging, +# 29(6):1310-1320, June 2010. +# +# Examples +# -------- +# +# >>> import copy +# >>> from nipype.interfaces.ants import N4BiasFieldCorrection +# >>> n4 = N4BiasFieldCorrection() +# >>> n4.inputs.dimension = 3 +# >>> n4.inputs.input_image = 'structural.nii' +# >>> n4.inputs.bspline_fitting_distance = 300 +# >>> n4.inputs.shrink_factor = 3 +# >>> n4.inputs.n_iterations = [50,50,30,20] +# >>> n4.cmdline +# 'N4BiasFieldCorrection --bspline-fitting [ 300 ] +# -d 3 --input-image structural.nii +# --convergence [ 50x50x30x20 ] --output structural_corrected.nii +# --shrink-factor 3' +# +# >>> n4_2 = copy.deepcopy(n4) +# >>> n4_2.inputs.convergence_threshold = 1e-6 +# >>> n4_2.cmdline +# 'N4BiasFieldCorrection --bspline-fitting [ 300 ] +# -d 3 --input-image structural.nii +# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii +# --shrink-factor 3' +# +# >>> n4_3 = copy.deepcopy(n4_2) +# >>> n4_3.inputs.bspline_order = 5 +# >>> n4_3.cmdline +# 'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] +# -d 3 --input-image structural.nii +# --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii +# --shrink-factor 3' +# +# >>> n4_4 = N4BiasFieldCorrection() +# >>> n4_4.inputs.input_image = 'structural.nii' +# >>> n4_4.inputs.save_bias = True +# >>> n4_4.inputs.dimension = 3 +# >>> n4_4.cmdline +# 'N4BiasFieldCorrection -d 3 --input-image structural.nii +# --output [ structural_corrected.nii, structural_bias.nii ]' +# +# >>> n4_5 = N4BiasFieldCorrection() +# >>> n4_5.inputs.input_image = 'structural.nii' +# >>> n4_5.inputs.dimension = 3 +# >>> n4_5.inputs.histogram_sharpening = (0.12, 0.02, 200) +# >>> n4_5.cmdline +# 'N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] +# --input-image structural.nii --output structural_corrected.nii' +# +# +task_name: n4_bias_field_correction +nipype_name: N4BiasFieldCorrection +nipype_module: nipype.interfaces.ants.segmentation +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + input_image: medimage/nifti1 + mask_image: medimage/nifti1 + weight_image: medimage/nifti1 + bias_image: medimage/nifti1 + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + output_image: medimage/nifti1 + bias_image: medimage/nifti1 + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + dimension: '3' + input_image: + bspline_fitting_distance: '300' + shrink_factor: '3' + n_iterations: '[50,50,30,20]' + imports: &id001 + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + - module: copy + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + convergence_threshold: 1e-6 + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + bspline_order: '5' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + input_image: + save_bias: 'True' + dimension: '3' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + input_image: + dimension: '3' + histogram_sharpening: (0.12, 0.02, 200) + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: +- cmdline: N4BiasFieldCorrection --bspline-fitting [ 300 ] -d 3 --input-image structural.nii --convergence [ 50x50x30x20 ] --output structural_corrected.nii --shrink-factor 3 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + dimension: '3' + input_image: + bspline_fitting_distance: '300' + shrink_factor: '3' + n_iterations: '[50,50,30,20]' + imports: *id001 + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: N4BiasFieldCorrection --bspline-fitting [ 300 ] -d 3 --input-image structural.nii --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii --shrink-factor 3 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + convergence_threshold: 1e-6 + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] -d 3 --input-image structural.nii --convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii --shrink-factor 3 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + bspline_order: '5' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: N4BiasFieldCorrection -d 3 --input-image structural.nii --output [ structural_corrected.nii, structural_bias.nii ] + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + input_image: + save_bias: 'True' + dimension: '3' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: N4BiasFieldCorrection -d 3 --histogram-sharpening [0.12,0.02,200] --input-image structural.nii --output structural_corrected.nii + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + input_image: + dimension: '3' + histogram_sharpening: (0.12, 0.02, 200) + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/task/ants_registration.yaml b/example-specs/task/ants_registration.yaml new file mode 100644 index 00000000..2cce5cfd --- /dev/null +++ b/example-specs/task/ants_registration.yaml @@ -0,0 +1,997 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'nipype.interfaces.ants.registration.Registration' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Inputs +# ------ +# dimension : enum +# image dimension (2 or 3) +# fixed_image : inputmultiobject +# Image to which the moving_image should be transformed(usually a structural image) +# fixed_image_mask : file +# Mask used to limit metric sampling region of the fixed imagein all stages +# fixed_image_masks : inputmultiobject +# Masks used to limit metric sampling region of the fixed image, defined per registration stage(Use "NULL" to omit a mask at a given stage) +# moving_image : inputmultiobject +# Image that will be registered to the space of fixed_image. This is theimage on which the transformations will be applied to +# moving_image_mask : file +# mask used to limit metric sampling region of the moving imagein all stages +# moving_image_masks : inputmultiobject +# Masks used to limit metric sampling region of the moving image, defined per registration stage(Use "NULL" to omit a mask at a given stage) +# save_state : file +# Filename for saving the internal restorable state of the registration +# restore_state : file +# Filename for restoring the internal restorable state of the registration +# initial_moving_transform : inputmultiobject +# A transform or a list of transforms that should be applied before the registration begins. Note that, when a list is given, the transformations are applied in reverse order. +# invert_initial_moving_transform : inputmultiobject +# One boolean or a list of booleans that indicatewhether the inverse(s) of the transform(s) definedin initial_moving_transform should be used. +# initial_moving_transform_com : enum +# Align the moving_image and fixed_image before registration using the geometric center of the images (=0), the image intensities (=1), or the origin of the images (=2). +# metric_item_trait : enum +# +# metric_stage_trait : traitcompound +# +# metric : list +# the metric(s) to use for each stage. Note that multiple metrics per stage are not supported in ANTS 1.9.1 and earlier. +# metric_weight_item_trait : float +# +# metric_weight_stage_trait : traitcompound +# +# metric_weight : list +# the metric weight(s) for each stage. The weights must sum to 1 per stage. +# radius_bins_item_trait : int +# +# radius_bins_stage_trait : traitcompound +# +# radius_or_number_of_bins : list +# the number of bins in each stage for the MI and Mattes metric, the radius for other metrics +# sampling_strategy_item_trait : enum +# +# sampling_strategy_stage_trait : traitcompound +# +# sampling_strategy : list +# the metric sampling strategy (strategies) for each stage +# sampling_percentage_item_trait : traitcompound +# +# sampling_percentage_stage_trait : traitcompound +# +# sampling_percentage : list +# the metric sampling percentage(s) to use for each stage +# use_estimate_learning_rate_once : list +# +# use_histogram_matching : traitcompound +# Histogram match the images before registration. +# interpolation : enum +# +# interpolation_parameters : traitcompound +# +# write_composite_transform : bool +# +# collapse_output_transforms : bool +# Collapse output transforms. Specifically, enabling this option combines all adjacent linear transforms and composes all adjacent displacement field transforms before writing the results to disk. +# initialize_transforms_per_stage : bool +# Initialize linear transforms from the previous stage. By enabling this option, the current linear stage transform is directly initialized from the previous stages linear transform; this allows multiple linear stages to be run where each stage directly updates the estimated linear transform from the previous stage. (e.g. Translation -> Rigid -> Affine). +# float : bool +# Use float instead of double for computations. +# transforms : list +# +# transform_parameters : list +# +# restrict_deformation : list +# This option allows the user to restrict the optimization of the displacement field, translation, rigid or affine transform on a per-component basis. For example, if one wants to limit the deformation or rotation of 3-D volume to the first two dimensions, this is possible by specifying a weight vector of '1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid transformation. Low-dimensional restriction only works if there are no preceding transformations. +# number_of_iterations : list +# +# smoothing_sigmas : list +# +# sigma_units : list +# units for smoothing sigmas +# shrink_factors : list +# +# convergence_threshold : list +# +# convergence_window_size : list +# +# output_transform_prefix : str +# +# output_warped_image : traitcompound +# +# output_inverse_warped_image : traitcompound +# +# winsorize_upper_quantile : range +# The Upper quantile to clip image ranges +# winsorize_lower_quantile : range +# The Lower quantile to clip image ranges +# random_seed : int +# Fixed seed for random number generation +# verbose : bool +# +# num_threads : int +# Number of ITK threads to use +# args : str +# Additional parameters to the command +# environ : dict +# Environment variables +# +# Outputs +# ------- +# forward_transforms : list +# List of output transforms for forward registration +# reverse_forward_transforms : list +# List of output transforms for forward registration reversed for antsApplyTransform +# reverse_transforms : list +# List of output transforms for reverse registration +# forward_invert_flags : list +# List of flags corresponding to the forward transforms +# reverse_forward_invert_flags : list +# List of flags corresponding to the forward transforms reversed for antsApplyTransform +# reverse_invert_flags : list +# List of flags corresponding to the reverse transforms +# composite_transform : file +# Composite transform file +# inverse_composite_transform : file +# Inverse composite transform file +# warped_image : file +# Outputs warped image +# inverse_warped_image : file +# Outputs the inverse of the warped image +# save_state : file +# The saved registration state to be restored +# metric_value : float +# the final value of metric +# elapsed_time : float +# the total elapsed time as reported by ANTs +# +# Docs +# ---- +# ANTs Registration command for registration of images +# +# `antsRegistration `_ registers a ``moving_image`` to a ``fixed_image``, +# using a predefined (sequence of) cost function(s) and transformation operations. +# The cost function is defined using one or more 'metrics', specifically +# local cross-correlation (``CC``), Mean Squares (``MeanSquares``), Demons (``Demons``), +# global correlation (``GC``), or Mutual Information (``Mattes`` or ``MI``). +# +# ANTS can use both linear (``Translation``, ``Rigid``, ``Affine``, ``CompositeAffine``, +# or ``Translation``) and non-linear transformations (``BSpline``, ``GaussianDisplacementField``, +# ``TimeVaryingVelocityField``, ``TimeVaryingBSplineVelocityField``, ``SyN``, ``BSplineSyN``, +# ``Exponential``, or ``BSplineExponential``). Usually, registration is done in multiple +# *stages*. For example first an Affine, then a Rigid, and ultimately a non-linear +# (Syn)-transformation. +# +# antsRegistration can be initialized using one or more transforms from moving_image +# to fixed_image with the ``initial_moving_transform``-input. For example, when you +# already have a warpfield that corrects for geometrical distortions in an EPI (functional) image, +# that you want to apply before an Affine registration to a structural image. +# You could put this transform into 'intial_moving_transform'. +# +# The Registration-interface can output the resulting transform(s) that map moving_image to +# fixed_image in a single file as a ``composite_transform`` (if ``write_composite_transform`` +# is set to ``True``), or a list of transforms as ``forwards_transforms``. It can also output +# inverse transforms (from ``fixed_image`` to ``moving_image``) in a similar fashion using +# ``inverse_composite_transform``. Note that the order of ``forward_transforms`` is in 'natural' +# order: the first element should be applied first, the last element should be applied last. +# +# Note, however, that ANTS tools always apply lists of transformations in reverse order (the last +# transformation in the list is applied first). Therefore, if the output forward_transforms +# is a list, one can not directly feed it into, for example, ``ants.ApplyTransforms``. To +# make ``ants.ApplyTransforms`` apply the transformations in the same order as ``ants.Registration``, +# you have to provide the list of transformations in reverse order from ``forward_transforms``. +# ``reverse_forward_transforms`` outputs ``forward_transforms`` in reverse order and can be used for +# this purpose. Note also that, because ``composite_transform`` is always a single file, this +# output is preferred for most use-cases. +# +# More information can be found in the `ANTS +# manual `_. +# +# See below for some useful examples. +# +# Examples +# -------- +# +# Set up a Registration node with some default settings. This Node registers +# 'fixed1.nii' to 'moving1.nii' by first fitting a linear 'Affine' transformation, and +# then a non-linear 'SyN' transformation, both using the Mutual Information-cost +# metric. +# +# The registration is initialized by first applying the (linear) transform +# trans.mat. +# +# >>> import copy, pprint +# >>> from nipype.interfaces.ants import Registration +# >>> reg = Registration() +# >>> reg.inputs.fixed_image = 'fixed1.nii' +# >>> reg.inputs.moving_image = 'moving1.nii' +# >>> reg.inputs.output_transform_prefix = "output_" +# >>> reg.inputs.initial_moving_transform = 'trans.mat' +# >>> reg.inputs.transforms = ['Affine', 'SyN'] +# >>> reg.inputs.transform_parameters = [(2.0,), (0.25, 3.0, 0.0)] +# >>> reg.inputs.number_of_iterations = [[1500, 200], [100, 50, 30]] +# >>> reg.inputs.dimension = 3 +# >>> reg.inputs.write_composite_transform = True +# >>> reg.inputs.collapse_output_transforms = False +# >>> reg.inputs.initialize_transforms_per_stage = False +# >>> reg.inputs.metric = ['Mattes']*2 +# >>> reg.inputs.metric_weight = [1]*2 # Default (value ignored currently by ANTs) +# >>> reg.inputs.radius_or_number_of_bins = [32]*2 +# >>> reg.inputs.sampling_strategy = ['Random', None] +# >>> reg.inputs.sampling_percentage = [0.05, None] +# >>> reg.inputs.convergence_threshold = [1.e-8, 1.e-9] +# >>> reg.inputs.convergence_window_size = [20]*2 +# >>> reg.inputs.smoothing_sigmas = [[1,0], [2,1,0]] +# >>> reg.inputs.sigma_units = ['vox'] * 2 +# >>> reg.inputs.shrink_factors = [[2,1], [3,2,1]] +# >>> reg.inputs.use_estimate_learning_rate_once = [True, True] +# >>> reg.inputs.use_histogram_matching = [True, True] # This is the default +# >>> reg.inputs.output_warped_image = 'output_warped_image.nii.gz' +# >>> reg.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# >>> reg.run() # doctest: +SKIP +# +# Same as reg1, but first invert the initial transform ('trans.mat') before applying it. +# +# >>> reg.inputs.invert_initial_moving_transform = True +# >>> reg1 = copy.deepcopy(reg) +# >>> reg1.inputs.winsorize_lower_quantile = 0.025 +# >>> reg1.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1' +# >>> reg1.run() # doctest: +SKIP +# +# Clip extremely high intensity data points using winsorize_upper_quantile. All data points +# higher than the 0.975 quantile are set to the value of the 0.975 quantile. +# +# >>> reg2 = copy.deepcopy(reg) +# >>> reg2.inputs.winsorize_upper_quantile = 0.975 +# >>> reg2.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1' +# +# Clip extremely low intensity data points using winsorize_lower_quantile. All data points +# lower than the 0.025 quantile are set to the original value at the 0.025 quantile. +# +# +# >>> reg3 = copy.deepcopy(reg) +# >>> reg3.inputs.winsorize_lower_quantile = 0.025 +# >>> reg3.inputs.winsorize_upper_quantile = 0.975 +# >>> reg3.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1' +# +# Use float instead of double for computations (saves memory usage) +# +# >>> reg3a = copy.deepcopy(reg) +# >>> reg3a.inputs.float = True +# >>> reg3a.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Force to use double instead of float for computations (more precision and memory usage). +# +# >>> reg3b = copy.deepcopy(reg) +# >>> reg3b.inputs.float = False +# >>> reg3b.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# 'collapse_output_transforms' can be used to put all transformation in a single 'composite_transform'- +# file. Note that forward_transforms will now be an empty list. +# +# >>> # Test collapse transforms flag +# >>> reg4 = copy.deepcopy(reg) +# >>> reg4.inputs.save_state = 'trans.mat' +# >>> reg4.inputs.restore_state = 'trans.mat' +# >>> reg4.inputs.initialize_transforms_per_stage = True +# >>> reg4.inputs.collapse_output_transforms = True +# >>> outputs = reg4._list_outputs() +# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +# {'composite_transform': '...data/output_Composite.h5', +# 'elapsed_time': , +# 'forward_invert_flags': [], +# 'forward_transforms': [], +# 'inverse_composite_transform': '...data/output_InverseComposite.h5', +# 'inverse_warped_image': , +# 'metric_value': , +# 'reverse_forward_invert_flags': [], +# 'reverse_forward_transforms': [], +# 'reverse_invert_flags': [], +# 'reverse_transforms': [], +# 'save_state': '...data/trans.mat', +# 'warped_image': '...data/output_warped_image.nii.gz'} +# >>> reg4.cmdline +# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# +# >>> # Test collapse transforms flag +# >>> reg4b = copy.deepcopy(reg4) +# >>> reg4b.inputs.write_composite_transform = False +# >>> outputs = reg4b._list_outputs() +# >>> pprint.pprint(outputs) # doctest: +ELLIPSIS, +# {'composite_transform': , +# 'elapsed_time': , +# 'forward_invert_flags': [False, False], +# 'forward_transforms': ['...data/output_0GenericAffine.mat', +# '...data/output_1Warp.nii.gz'], +# 'inverse_composite_transform': , +# 'inverse_warped_image': , +# 'metric_value': , +# 'reverse_forward_invert_flags': [False, False], +# 'reverse_forward_transforms': ['...data/output_1Warp.nii.gz', +# '...data/output_0GenericAffine.mat'], +# 'reverse_invert_flags': [True, False], +# 'reverse_transforms': ['...data/output_0GenericAffine.mat', '...data/output_1InverseWarp.nii.gz'], +# 'save_state': '...data/trans.mat', +# 'warped_image': '...data/output_warped_image.nii.gz'} +# >>> reg4b.aggregate_outputs() # doctest: +SKIP +# >>> reg4b.cmdline +# 'antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0' +# +# One can use multiple similarity metrics in a single registration stage.The Node below first +# performs a linear registration using only the Mutual Information ('Mattes')-metric. +# In a second stage, it performs a non-linear registration ('Syn') using both a +# Mutual Information and a local cross-correlation ('CC')-metric. Both metrics are weighted +# equally ('metric_weight' is .5 for both). The Mutual Information- metric uses 32 bins. +# The local cross-correlations (correlations between every voxel's neighborhoods) is computed +# with a radius of 4. +# +# >>> # Test multiple metrics per stage +# >>> reg5 = copy.deepcopy(reg) +# >>> reg5.inputs.fixed_image = 'fixed1.nii' +# >>> reg5.inputs.moving_image = 'moving1.nii' +# >>> reg5.inputs.metric = ['Mattes', ['Mattes', 'CC']] +# >>> reg5.inputs.metric_weight = [1, [.5,.5]] +# >>> reg5.inputs.radius_or_number_of_bins = [32, [32, 4] ] +# >>> reg5.inputs.sampling_strategy = ['Random', None] # use default strategy in second stage +# >>> reg5.inputs.sampling_percentage = [0.05, [0.05, 0.10]] +# >>> reg5.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# ANTS Registration can also use multiple modalities to perform the registration. Here it is assumed +# that fixed1.nii and fixed2.nii are in the same space, and so are moving1.nii and +# moving2.nii. First, a linear registration is performed matching fixed1.nii to moving1.nii, +# then a non-linear registration is performed to match fixed2.nii to moving2.nii, starting from +# the transformation of the first step. +# +# >>> # Test multiple inputS +# >>> reg6 = copy.deepcopy(reg5) +# >>> reg6.inputs.fixed_image = ['fixed1.nii', 'fixed2.nii'] +# >>> reg6.inputs.moving_image = ['moving1.nii', 'moving2.nii'] +# >>> reg6.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Different methods can be used for the interpolation when applying transformations. +# +# >>> # Test Interpolation Parameters (BSpline) +# >>> reg7a = copy.deepcopy(reg) +# >>> reg7a.inputs.interpolation = 'BSpline' +# >>> reg7a.inputs.interpolation_parameters = (3,) +# >>> reg7a.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# >>> # Test Interpolation Parameters (MultiLabel/Gaussian) +# >>> reg7b = copy.deepcopy(reg) +# >>> reg7b.inputs.interpolation = 'Gaussian' +# >>> reg7b.inputs.interpolation_parameters = (1.0, 1.0) +# >>> reg7b.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# BSplineSyN non-linear registration with custom parameters. +# +# >>> # Test Extended Transform Parameters +# >>> reg8 = copy.deepcopy(reg) +# >>> reg8.inputs.transforms = ['Affine', 'BSplineSyN'] +# >>> reg8.inputs.transform_parameters = [(2.0,), (0.25, 26, 0, 3)] +# >>> reg8.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Mask the fixed image in the second stage of the registration (but not the first). +# +# >>> # Test masking +# >>> reg9 = copy.deepcopy(reg) +# >>> reg9.inputs.fixed_image_masks = ['NULL', 'fixed1.nii'] +# >>> reg9.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +# Here we use both a warpfield and a linear transformation, before registration commences. Note that +# the first transformation that needs to be applied ('ants_Warp.nii.gz') is last in the list of +# 'initial_moving_transform'. +# +# >>> # Test initialization with multiple transforms matrices (e.g., unwarp and affine transform) +# >>> reg10 = copy.deepcopy(reg) +# >>> reg10.inputs.initial_moving_transform = ['func_to_struct.mat', 'ants_Warp.nii.gz'] +# >>> reg10.inputs.invert_initial_moving_transform = [False, False] +# >>> reg10.cmdline +# 'antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1' +# +task_name: registration +nipype_name: Registration +nipype_module: nipype.interfaces.ants.registration +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + fixed_image: medimage/nifti1+list-of + fixed_image_mask: generic/file + fixed_image_masks: generic/file+list-of + moving_image: medimage/nifti1+list-of + moving_image_mask: generic/file + moving_image_masks: generic/file+list-of + save_state: datascience/text-matrix + restore_state: datascience/text-matrix + initial_moving_transform: datascience/text-matrix+list-of + invert_initial_moving_transform: generic/file+list-of + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + composite_transform: generic/file + inverse_composite_transform: generic/file + warped_image: generic/file + inverse_warped_image: generic/file + save_state: datascience/text-matrix + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + fixed_image: + moving_image: + output_transform_prefix: '"output_"' + initial_moving_transform: + transforms: '["Affine", "SyN"]' + transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]' + number_of_iterations: '[[1500, 200], [100, 50, 30]]' + dimension: '3' + write_composite_transform: 'True' + collapse_output_transforms: 'False' + initialize_transforms_per_stage: 'False' + metric: '["Mattes"]*2' + metric_weight: '[1]*2 # Default (value ignored currently by ANTs)' + radius_or_number_of_bins: '[32]*2' + sampling_strategy: '["Random", None]' + sampling_percentage: '[0.05, None]' + convergence_threshold: '[1.e-8, 1.e-9]' + convergence_window_size: '[20]*2' + smoothing_sigmas: '[[1,0], [2,1,0]]' + sigma_units: '["vox"] * 2' + shrink_factors: '[[2,1], [3,2,1]]' + use_estimate_learning_rate_once: '[True, True]' + use_histogram_matching: '[True, True] # This is the default' + output_warped_image: '"output_warped_image.nii.gz"' + imports: &id001 + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + - module: copy + - module: pprint + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + invert_initial_moving_transform: + winsorize_lower_quantile: '0.025' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + winsorize_upper_quantile: '0.975' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + winsorize_lower_quantile: '0.025' + winsorize_upper_quantile: '0.975' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + float: 'True' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + float: 'False' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + save_state: + restore_state: + initialize_transforms_per_stage: 'True' + collapse_output_transforms: 'True' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + write_composite_transform: 'False' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + fixed_image: + moving_image: + metric: '["Mattes", ["Mattes", "CC"]]' + metric_weight: '[1, [.5,.5]]' + radius_or_number_of_bins: '[32, [32, 4] ]' + sampling_strategy: '["Random", None] # use default strategy in second stage' + sampling_percentage: '[0.05, [0.05, 0.10]]' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + fixed_image: + moving_image: + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + interpolation: '"BSpline"' + interpolation_parameters: (3,) + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + interpolation: '"Gaussian"' + interpolation_parameters: (1.0, 1.0) + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + transforms: '["Affine", "BSplineSyN"]' + transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + fixed_image_masks: + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + initial_moving_transform: + invert_initial_moving_transform: + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + fixed_image: + moving_image: + output_transform_prefix: '"output_"' + initial_moving_transform: + transforms: '["Affine", "SyN"]' + transform_parameters: '[(2.0,), (0.25, 3.0, 0.0)]' + number_of_iterations: '[[1500, 200], [100, 50, 30]]' + dimension: '3' + write_composite_transform: 'True' + collapse_output_transforms: 'False' + initialize_transforms_per_stage: 'False' + metric: '["Mattes"]*2' + metric_weight: '[1]*2 # Default (value ignored currently by ANTs)' + radius_or_number_of_bins: '[32]*2' + sampling_strategy: '["Random", None]' + sampling_percentage: '[0.05, None]' + convergence_threshold: '[1.e-8, 1.e-9]' + convergence_window_size: '[20]*2' + smoothing_sigmas: '[[1,0], [2,1,0]]' + sigma_units: '["vox"] * 2' + shrink_factors: '[[2,1], [3,2,1]]' + use_estimate_learning_rate_once: '[True, True]' + use_histogram_matching: '[True, True] # This is the default' + output_warped_image: '"output_warped_image.nii.gz"' + imports: *id001 + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + invert_initial_moving_transform: + winsorize_lower_quantile: '0.025' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 0.975 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + winsorize_upper_quantile: '0.975' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.025, 0.975 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + winsorize_lower_quantile: '0.025' + winsorize_upper_quantile: '0.975' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 1 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + float: 'True' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --float 0 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + float: 'False' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + save_state: + restore_state: + initialize_transforms_per_stage: 'True' + collapse_output_transforms: 'True' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 1 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 1 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --restore-state trans.mat --save-state trans.mat --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 0 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + write_composite_transform: 'False' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed1.nii, moving1.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + fixed_image: + moving_image: + metric: '["Mattes", ["Mattes", "CC"]]' + metric_weight: '[1, [.5,.5]]' + radius_or_number_of_bins: '[32, [32, 4] ]' + sampling_strategy: '["Random", None] # use default strategy in second stage' + sampling_percentage: '[0.05, [0.05, 0.10]]' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 0.5, 32, None, 0.05 ] --metric CC[ fixed2.nii, moving2.nii, 0.5, 4, None, 0.1 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + fixed_image: + moving_image: + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation BSpline[ 3 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + interpolation: '"BSpline"' + interpolation_parameters: (3,) + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Gaussian[ 1.0, 1.0 ] --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + interpolation: '"Gaussian"' + interpolation_parameters: (1.0, 1.0) + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform BSplineSyN[ 0.25, 26, 0, 3 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + transforms: '["Affine", "BSplineSyN"]' + transform_parameters: '[(2.0,), (0.25, 26, 0, 3)]' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ trans.mat, 1 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ NULL, NULL ] --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --masks [ fixed1.nii, NULL ] --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + fixed_image_masks: + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS +- cmdline: antsRegistration --collapse-output-transforms 0 --dimensionality 3 --initial-moving-transform [ func_to_struct.mat, 0 ] [ ants_Warp.nii.gz, 0 ] --initialize-transforms-per-stage 0 --interpolation Linear --output [ output_, output_warped_image.nii.gz ] --transform Affine[ 2.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32, Random, 0.05 ] --convergence [ 1500x200, 1e-08, 20 ] --smoothing-sigmas 1.0x0.0vox --shrink-factors 2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --transform SyN[ 0.25, 3.0, 0.0 ] --metric Mattes[ fixed1.nii, moving1.nii, 1, 32 ] --convergence [ 100x50x30, 1e-09, 20 ] --smoothing-sigmas 2.0x1.0x0.0vox --shrink-factors 3x2x1 --use-estimate-learning-rate-once 1 --use-histogram-matching 1 --winsorize-image-intensities [ 0.0, 1.0 ] --write-composite-transform 1 + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + initial_moving_transform: + invert_initial_moving_transform: + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/task/ants_registration_Registration.yaml b/example-specs/task/ants_registration_Registration.yaml deleted file mode 100644 index deb20aea..00000000 --- a/example-specs/task/ants_registration_Registration.yaml +++ /dev/null @@ -1,32 +0,0 @@ -task_name: Registration -nipype_module: nipype.interfaces.ants.registration -nipype_name: -inputs: - omit: - rename: - types: - fixed_image: medimage/nifti-gz - moving_image: medimage/nifti-gz - metadata: -outputs: - omit: - rename: - types: - callables: - requirements: - output_warped_image: ["fixed_image", "moving_image", "output_transform_prefix"] - templates: - output_warped_image: "{output_transform_prefix}warped" -test: -doctest: - cmdline: >- - antsRegistration --output [ output_, output_warped_image.nii.gz ] - --metric Mattes[ /mock/medimage/nifti-gz.nii.gz, /mock/medimage/nifti-gz.nii.gz, - 1, 32, Random, 0.05 ] - inputs: - fixed_image: - moving_image: - - - - \ No newline at end of file diff --git a/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml b/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml deleted file mode 100644 index eabb3de6..00000000 --- a/example-specs/task/ants_segmentation_N4BiasFieldCorrection.yaml +++ /dev/null @@ -1,21 +0,0 @@ -task_name: N4BiasFieldCorrection -nipype_module: nipype.interfaces.ants.segmentation -nipype_name: -inputs: - omit: - rename: - types: - metadata: -outputs: - omit: - rename: - types: - callables: - templates: - requirements: - bias_image: ["save_bias"] -test: -doctest: - cmdline: - inputs: - input_image: test.nii.gz diff --git a/example-specs/task/apply_vol_transform.yaml b/example-specs/task/apply_vol_transform.yaml new file mode 100644 index 00000000..020a38f0 --- /dev/null +++ b/example-specs/task/apply_vol_transform.yaml @@ -0,0 +1,159 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'nipype.interfaces.freesurfer.preprocess.ApplyVolTransform' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Inputs +# ------ +# source_file : file +# Input volume you wish to transform +# transformed_file : file +# Output volume +# target_file : file +# Output template volume +# tal : bool +# map to a sub FOV of MNI305 (with --reg only) +# tal_resolution : float +# Resolution to sample when using tal +# fs_target : bool +# use orig.mgz from subject in regfile as target +# reg_file : file +# tkRAS-to-tkRAS matrix (tkregister2 format) +# lta_file : file +# Linear Transform Array file +# lta_inv_file : file +# LTA, invert +# fsl_reg_file : file +# fslRAS-to-fslRAS matrix (FSL format) +# xfm_reg_file : file +# ScannerRAS-to-ScannerRAS matrix (MNI format) +# reg_header : bool +# ScannerRAS-to-ScannerRAS matrix = identity +# mni_152_reg : bool +# target MNI152 space +# subject : str +# set matrix = identity and use subject for any templates +# inverse : bool +# sample from target to source +# interp : enum +# Interpolation method ( or nearest) +# no_resample : bool +# Do not resample; just change vox2ras matrix +# m3z_file : file +# This is the morph to be applied to the volume. Unless the morph is in mri/transforms (eg.: for talairach.m3z computed by reconall), you will need to specify the full path to this morph and use the --noDefM3zPath flag. +# no_ded_m3z_path : bool +# To be used with the m3z flag. Instructs the code not to look for them3z morph in the default location (SUBJECTS_DIR/subj/mri/transforms), but instead just use the path indicated in --m3z. +# invert_morph : bool +# Compute and use the inverse of the non-linear morph to resample the input volume. To be used by --m3z. +# subjects_dir : directory +# subjects directory +# args : str +# Additional parameters to the command +# environ : dict +# Environment variables +# +# Outputs +# ------- +# transformed_file : file +# Path to output file if used normally +# +# Docs +# ---- +# Use FreeSurfer mri_vol2vol to apply a transform. +# +# Examples +# -------- +# +# >>> from nipype.interfaces.freesurfer import ApplyVolTransform +# >>> applyreg = ApplyVolTransform() +# >>> applyreg.inputs.source_file = 'structural.nii' +# >>> applyreg.inputs.reg_file = 'register.dat' +# >>> applyreg.inputs.transformed_file = 'struct_warped.nii' +# >>> applyreg.inputs.fs_target = True +# >>> applyreg.cmdline +# 'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii' +# +# +task_name: apply_vol_transform +nipype_name: ApplyVolTransform +nipype_module: nipype.interfaces.freesurfer.preprocess +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + source_file: medimage/nifti1 + target_file: medimage/nifti1 + reg_file: datascience/dat-file + lta_file: generic/file + lta_inv_file: generic/file + fsl_reg_file: generic/file + xfm_reg_file: generic/file + m3z_file: generic/file + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + transformed_file: medimage/nifti1 + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + transformed_file: '"struct_warped.nii"' + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + source_file: + reg_file: + transformed_file: '"struct_warped.nii"' + fs_target: 'True' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: +- cmdline: mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + source_file: + reg_file: + transformed_file: '"struct_warped.nii"' + fs_target: 'True' + imports: + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: '''' + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/task/extract_roi.yaml b/example-specs/task/extract_roi.yaml new file mode 100644 index 00000000..57391826 --- /dev/null +++ b/example-specs/task/extract_roi.yaml @@ -0,0 +1,146 @@ +# This file is used to manually specify the semi-automatic conversion of +# 'nipype.interfaces.fsl.utils.ExtractROI' from Nipype to Pydra. +# +# Please fill-in/edit the fields below where appropriate +# +# Inputs +# ------ +# in_file : file +# input file +# roi_file : file +# output file +# x_min : int +# +# x_size : int +# +# y_min : int +# +# y_size : int +# +# z_min : int +# +# z_size : int +# +# t_min : int +# +# t_size : int +# +# crop_list : list +# list of two tuples specifying crop options +# output_type : enum +# FSL output type +# args : str +# Additional parameters to the command +# environ : dict +# Environment variables +# +# Outputs +# ------- +# roi_file : file +# +# +# Docs +# ---- +# Uses FSL Fslroi command to extract region of interest (ROI) +# from an image. +# +# You can a) take a 3D ROI from a 3D data set (or if it is 4D, the +# same ROI is taken from each time point and a new 4D data set is +# created), b) extract just some time points from a 4D data set, or +# c) control time and space limits to the ROI. Note that the +# arguments are minimum index and size (not maximum index). So to +# extract voxels 10 to 12 inclusive you would specify 10 and 3 (not +# 10 and 12). +# +# +# Examples +# -------- +# +# >>> from nipype.interfaces.fsl import ExtractROI +# >>> from nipype.testing import anatfile +# >>> fslroi = ExtractROI(in_file=anatfile, roi_file='bar.nii', t_min=0, +# ... t_size=1) +# >>> fslroi.cmdline == 'fslroi %s bar.nii 0 1' % anatfile +# True +# +# +# +task_name: extract_roi +nipype_name: ExtractROI +nipype_module: nipype.interfaces.fsl.utils +inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + in_file: medimage/nifti1 + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) +outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + roi_file: medimage/nifti1 + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + roi_file: '"bar.nii"' + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present +tests: +- inputs: + # dict[str, str] - values to provide to inputs fields in the task initialisation + # (if not specified, will try to choose a sensible value) + in_file: + roi_file: '"bar.nii"' + t_min: '0' + t_size: '1' + imports: &id001 + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + - module: nipype.testing + name: anatfile + alias: + expected_outputs: + # dict[str, str] - expected values for selected outputs, noting that tests will typically + # be terminated before they complete for time-saving reasons, and therefore + # these values will be ignored, when running in CI + timeout: 10 + # int - the value to set for the timeout in the generated test, + # after which the test will be considered to have been initialised + # successfully. Set to 0 to disable the timeout (warning, this could + # lead to the unittests taking a very long time to complete) + xfail: true + # bool - whether the unittest is expected to fail or not. Set to false + # when you are satisfied with the edits you have made to this file +doctests: +- cmdline: + # str - the expected cmdline output + inputs: + # dict[str, str] - name-value pairs for inputs to be provided to the doctest. + # If the field is of file-format type and the value is None, then the + # '.mock()' method of the corresponding class is used instead. + in_file: + roi_file: '"bar.nii"' + t_min: '0' + t_size: '1' + imports: *id001 + # list[nipype2pydra.task.importstatement] - list import statements required by the test, with each list item + # consisting of 'module', 'name', and optionally 'alias' keys + directive: + # str - any doctest directive to place on the cmdline call, e.g. # doctest: +ELLIPSIS diff --git a/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml b/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml deleted file mode 100644 index 441f2ac9..00000000 --- a/example-specs/task/freesurfer_preprocess_ApplyVolTransform.yaml +++ /dev/null @@ -1,21 +0,0 @@ -task_name: ApplyVolTransform -nipype_module: nipype.interfaces.freesurfer.preprocess -nipype_name: -inputs: - omit: - rename: - types: - source_file: medimage/nifti-gz - metadata: -outputs: - omit: - rename: - types: - callables: - templates: - transformed_file: "{source_file}_warped" - requirements: -doctest: - cmdline: mri_vol2vol /mock/medimage/nifti-gz.nii.gz - inputs: - source_file: diff --git a/example-specs/task/fsl_utils_ExtractROI.yaml b/example-specs/task/fsl_utils_ExtractROI.yaml deleted file mode 100644 index 49b67c7b..00000000 --- a/example-specs/task/fsl_utils_ExtractROI.yaml +++ /dev/null @@ -1,30 +0,0 @@ -task_name: ExtractROI -nipype_module: nipype.interfaces.fsl.utils -nipype_name: -inputs: - omit: - - crop_list - rename: - types: - in_file: medimage/nifti-gz - metadata: -outputs: - omit: - rename: - types: - callables: - requirements: - roi_file: [in_file] - templates: - roi_file: "{in_file}_trim" -doctest: - cmdline: fslroi test.nii.gz test_trim.nii.gz 0 3 - inputs: - t_min: 0 - t_size: 3 - roi_file: test_trim.nii.gz -test: - inputs: - t_min: 0 - t_size: 1 - outputs: \ No newline at end of file From e81a4fecf5c2a0dd3b6ec4abb0913a75d12da28f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 2 Aug 2023 07:26:05 +1000 Subject: [PATCH 33/42] touched up package-gen README and converted to RST --- scripts/pkg_gen/create_packages.py | 3 +- .../resources/{README.md => README.rst} | 58 +++++++++++++------ 2 files changed, 42 insertions(+), 19 deletions(-) rename scripts/pkg_gen/resources/{README.md => README.rst} (62%) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index c6b875d7..d26d0f58 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -419,7 +419,8 @@ def copy_ignore(_, names): ) # Add modified README - shutil.copy(RESOURCES_DIR / "README.md", pkg_dir / "README.md") + os.unlink(pkg_dir / "README.md") + shutil.copy(RESOURCES_DIR / "README.rst", pkg_dir / "README.rst") # Add "pydra.tasks..auto to gitignore" with open(pkg_dir / ".gitignore", "a") as f: diff --git a/scripts/pkg_gen/resources/README.md b/scripts/pkg_gen/resources/README.rst similarity index 62% rename from scripts/pkg_gen/resources/README.md rename to scripts/pkg_gen/resources/README.rst index accd0b43..99cad5ed 100644 --- a/scripts/pkg_gen/resources/README.md +++ b/scripts/pkg_gen/resources/README.rst @@ -1,10 +1,26 @@ -# Pydra task package for CHANGEME +Pydra task package for CHANGEME +=============================== -This package contains a collection of Pydra task interfaces for CHANGEME. The basis of -which have been semi-automatically converted from the corresponding [Nipype](https://github.com/nipy/nipype) -interfaces. +.. image:: https://github.com/nipype/pydra-CHANGEME/actions/workflows/tests.yml/badge.svg + :target: https://github.com/nipype/pydra-CHANGEME/actions/workflows/tests.yml +.. .. image:: https://codecov.io/gh/nipype/pydra-CHANGEME/branch/main/graph/badge.svg?token=UIS0OGPST7 +.. :target: https://codecov.io/gh/nipype/pydra-CHANGEME +.. image:: https://img.shields.io/pypi/pyversions/pydra-CHANGEME.svg + :target: https://pypi.python.org/pypi/pydra-CHANGEME/ + :alt: Supported Python versions +.. image:: https://img.shields.io/pypi/v/pydra-CHANGEME.svg + :target: https://pypi.python.org/pypi/pydra-CHANGEME/ + :alt: Latest Version -## Automatically-generated vs manually-curated tasks + +This package contains a collection of Pydra task interfaces for the CHANGEME toolkit. +The basis of this collection has been formed by the semi-automatic conversion of +existing `Nipype `__ interfaces to Pydra using the +`Nipype2Pydra `__ tool + + +Automatically-generated vs manually-curated tasks +------------------------------------------------- Automatically generated tasks can be found in the `pydra.tasks.CHANGEME.auto` package. These packages should be treated with extreme caution as they likely do not pass testing. @@ -12,7 +28,8 @@ Generated tasks that have been edited and pass testing are imported into one or `pydra.tasks.CHANGEME.v*` packages, corresponding to the version of the CHANGEME toolkit they are designed for. -## Tests +Tests +----- This package comes with a battery of automatically generated test modules. To install the necessary dependencies to run the tests @@ -21,7 +38,7 @@ the necessary dependencies to run the tests $ pip install -e .[test] ``` -Then the tests, including [doctests](https://docs.python.org/3/library/doctest.html), can be launched using +Then the tests, including `doctests` `__, can be launched using ``` $ pytest --doctest-modules pydra/tasks/* @@ -35,19 +52,23 @@ run to completion. To disable this and run the test(s) through to completion run $ pytest --doctest-modules --timeout-pass 0 pydra/tasks/* ``` -## Continuous integration +Continuous integration +---------------------- -This template uses [GitHub Actions](https://docs.github.com/en/actions/) to run tests and +This template uses `GitHub Actions `__` to run tests and deploy packages to PYPI. New packages are built and uploaded when releases are created on GitHub, or new releases of Nipype or the Nipype2Pydra conversion tool are released. Releases triggered by updates to Nipype or Nipype2Pydra are signified by the `postN` -suffix where N = with '.'s stripped, e.g. +suffix where `N = ` with the '.'s stripped, e.g. `v0.2.3post185010` corresponds to the v0.2.3 tag of this repository with auto-generated packages from Nipype 1.8.5 using Nipype2Pydra 0.1.0. -# Contributing to this package -## Developer installation +Contributing to this package +---------------------------- + +Developer installation +~~~~~~~~~~~~~~~~~~~~~~ Install repo in developer mode from the source directory and install pre-commit to @@ -63,7 +84,7 @@ Pydra task interfaces from their Nipype counterparts ``` $ pip install -r nipype-auto-conv/requirements.txt -$ nipype-auto-gen/generate +$ nipype-auto-conv/generate ``` ## Methodology @@ -75,19 +96,20 @@ The development of this package is expected to have two phases 2. When the Pydra tasks are considered be mature and they are edited by hand Different tasks will probably mature at different times so there will probably be an -intermediate phase between 1 & 2. +intermediate phase between 1 and 2. -### Auto-conversion phase +Auto-conversion phase +~~~~~~~~~~~~~~~~~~~~~ The auto-converted Pydra tasks are generated from their corresponding Nipype interface in combination with "conversion hints" contained in YAML specs -located in `nipype-auto-gen/specs/`. The self-documented conversion specs are +located in `nipype-auto-conv/specs/`. The self-documented conversion specs are to be edited by hand in order to assist the auto-converter produce valid pydra tasks. After editing one or more conversion specs the `pydra.tasks.CHANGEME.auto` package should be regenerated by running ``` -$ nipype-auto-gen/generate +$ nipype-auto-conv/generate ``` The tests should be run on the auto-generated tasks to see if they are valid @@ -101,5 +123,5 @@ to import the now valid task interface to signify that it has been validated and for use, e.g. ```python -from pydra.tasks.CHANGEME.auto import edited_task +from pydra.tasks.CHANGEME.auto import ``` \ No newline at end of file From 45a105a79747673c5e006c300dd9b215689f911f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 2 Aug 2023 15:37:04 +1000 Subject: [PATCH 34/42] touched up readme and template readme --- README.rst | 55 ++++++++++++++++++++-------- scripts/pkg_gen/resources/README.rst | 23 +++++++++++- 2 files changed, 62 insertions(+), 16 deletions(-) diff --git a/README.rst b/README.rst index c7c55318..4dfb207d 100644 --- a/README.rst +++ b/README.rst @@ -47,21 +47,46 @@ specification, .. code-block:: yaml - task_name: Registration - nipype_module: nipype.interfaces.ants.registration - output_requirements: - output_warped_image: ["fixed_image", "moving_image", "output_transform_prefix"] - output_templates: - output_warped_image: "{output_transform_prefix}warped" - doctest: - fixed_image: test.nii.gz - moving_image: test.nii.gz - cmdline: >- - antsRegistration --output [ output_, output_warped_image.nii.gz ] - --metric Mattes[ test.nii, test.nii, 1, 32, Random, 0.05 ] - tests_inputs: [] - tests_outputs: - - AttributeError + task_name: n4_bias_field_correction + nipype_name: N4BiasFieldCorrection + nipype_module: nipype.interfaces.ants.segmentation + inputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + input_image: medimage/nifti1 + mask_image: medimage/nifti1 + weight_image: medimage/nifti1 + bias_image: medimage/nifti1 + metadata: + # dict[str, dict[str, any]] - additional metadata to set on any of the input fields (e.g. out_file: position: 1) + outputs: + omit: + # list[str] - fields to omit from the Pydra interface + rename: + # dict[str, str] - fields to rename in the Pydra interface + types: + # dict[str, type] - override inferred types (use "mime-like" string for file-format types, + # e.g. 'medimage/nifti-gz'). For most fields the type will be correctly inferred + # from the nipype interface, but you may want to be more specific, particularly + # for file types, where specifying the format also specifies the file that will be + # passed to the field in the automatically generated unittests. + output_image: medimage/nifti1 + bias_image: medimage/nifti1 + callables: + # dict[str, str] - names of methods/callable classes defined in the adjacent `*_callables.py` + # to set to the `callable` attribute of output fields + templates: + # dict[str, str] - `output_file_template` values to be provided to output fields + requirements: + # dict[str, list[str]] - input fields that are required to be provided for the output field to be present *Detailed description of the different options to go here* diff --git a/scripts/pkg_gen/resources/README.rst b/scripts/pkg_gen/resources/README.rst index 99cad5ed..56efa9a7 100644 --- a/scripts/pkg_gen/resources/README.rst +++ b/scripts/pkg_gen/resources/README.rst @@ -1,3 +1,4 @@ +=============================== Pydra task package for CHANGEME =============================== @@ -124,4 +125,24 @@ for use, e.g. ```python from pydra.tasks.CHANGEME.auto import -``` \ No newline at end of file +``` + +Typing and sample test data +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The automatically generated tests will attempt to provided the task instance to be tested +with sensible default values based on the type of the field and any constraints it has +on it. However, these will often need to be manually overridden after consulting the +underlying tool's documentation. + +For file-based data, automatically generated file-system objects will be created for +selected format types, e.g. Nifti, Dicom. Therefore, it is important to specify the +format of the file using the "mime-like" string corresponding to a +`fileformats `__ class +in the ``inputs > types`` and ``outputs > types`` dicts of the YAML spec. + +If the required file-type is not found implemented within fileformats, please see the `fileformats +docs `__ for instructions on how to define +new fileformat types, and see +`fileformats-medimage-extras `__ +for an example on how to implement methods to generate sample data for them. From 1e8f65d55d2ead6c0befbfbc1bcda595a2f3fd67 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 3 Aug 2023 14:49:11 +1000 Subject: [PATCH 35/42] added descriptions to input and output field references --- scripts/pkg_gen/create_packages.py | 70 ++++++++++++++----- scripts/pkg_gen/resources/README.rst | 5 ++ .../resources/gh_workflows/pythonpackage.yaml | 30 ++++---- 3 files changed, 73 insertions(+), 32 deletions(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index d26d0f58..3dd4c481 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -124,11 +124,13 @@ def generate_packages( ( preamble, + input_helps, + output_helps, file_inputs, file_outputs, genfile_outputs, multi_inputs, - ) = generate_spec_preamble(nipype_interface) + ) = parse_nipype_interface(nipype_interface) # Create "stubs" for each of the available fields def fields_stub(name, category_class, values=None): @@ -172,7 +174,13 @@ def fields_stub(name, category_class, values=None): prev_block += para doctests: ty.List[DocTestGenerator] = [] - tests: ty.List[TestGenerator] = [] + tests: ty.List[TestGenerator] = [ + fields_stub( + "test", + TestGenerator, + {"inputs": {i: None for i in input_helps}, "imports": None}, + ) + ] for doctest_str in doctest_blocks: if ">>>" in doctest_str: @@ -356,6 +364,13 @@ def combine_types(type_, prev_type): f" {field.name}:" + r"\1" + f"\n{comment}", yaml_str, ) + # Add comments to input and output fields, with their type and description + for inpt, desc in input_helps.items(): + yaml_str = re.sub(f" ({inpt}):(.*)", r" \1:\2\n # ##PLACEHOLDER##", yaml_str) + yaml_str = yaml_str.replace("##PLACEHOLDER##", desc) + for outpt, desc in output_helps.items(): + yaml_str = re.sub(f" ({outpt}):(.*)", r" \1:\2\n # ##PLACEHOLDER##", yaml_str) + yaml_str = yaml_str.replace("##PLACEHOLDER##", desc) with open(spec_dir / (spec_name + ".yaml"), "w") as f: f.write(preamble + yaml_str) @@ -421,6 +436,11 @@ def copy_ignore(_, names): # Add modified README os.unlink(pkg_dir / "README.md") shutil.copy(RESOURCES_DIR / "README.rst", pkg_dir / "README.rst") + with open(pkg_dir / "pyproject.toml") as f: + pyproject_toml = f.read() + pyproject_toml = pyproject_toml.replace("README.md", "README.rst") + with open(pkg_dir / "pyproject.toml", "w") as f: + f.write(pyproject_toml) # Add "pydra.tasks..auto to gitignore" with open(pkg_dir / ".gitignore", "a") as f: @@ -447,11 +467,19 @@ def copy_ignore(_, names): return pkg_dir -def generate_spec_preamble( +def parse_nipype_interface( nipype_interface, -) -> ty.Tuple[str, ty.List[str], ty.List[str], ty.List[str], ty.List[str]]: +) -> ty.Tuple[ + str, + ty.Dict[str, str], + ty.Dict[str, str], + ty.List[str], + ty.List[str], + ty.List[str], + ty.List[str], +]: """Generate preamble comments at start of file with args and doc strings""" - inputs_desc = "" + input_helps = {} file_inputs = [] genfile_outputs = [] multi_inputs = [] @@ -460,7 +488,10 @@ def generate_spec_preamble( if inpt_name in ("trait_added", "trait_modified"): continue inpt_desc = inpt.desc.replace("\n", " ") if inpt.desc else "" - inputs_desc += f"# {inpt_name} : {type(inpt.trait_type).__name__.lower()}\n# {inpt_desc}\n" + inpt_mdata = f"type={type(inpt.trait_type).__name__.lower()}|default={inpt.default!r}" + if isinstance(inpt.trait_type, nipype.interfaces.base.core.traits.Enum): + inpt_mdata += f"|allowed[{','.join(sorted(repr(v) for v in inpt.trait_type.values))}]" + input_helps[inpt_name] = f"{inpt_mdata}: {inpt_desc}" if inpt.genfile: genfile_outputs.append(inpt_name) elif type(inpt.trait_type).__name__ == "File": @@ -475,13 +506,15 @@ def generate_spec_preamble( file_inputs.append(inpt_name) multi_inputs.append(inpt_name) file_outputs = [] - outputs_desc = "" + output_helps = {} if nipype_interface.output_spec: for outpt_name, outpt in nipype_interface.output_spec().traits().items(): if outpt_name in ("trait_added", "trait_modified"): continue outpt_desc = outpt.desc.replace("\n", " ") if outpt.desc else "" - outputs_desc += f"# {outpt_name} : {type(outpt.trait_type).__name__.lower()}\n# {outpt_desc}\n" + output_helps[ + outpt_name + ] = f"type={type(outpt.trait_type).__name__.lower()}: {outpt_desc}" if type(outpt.trait_type).__name__ == "File": file_outputs.append(outpt_name) doc_string = nipype_interface.__doc__ if nipype_interface.__doc__ else "" @@ -493,17 +526,19 @@ def generate_spec_preamble( # # Please fill-in/edit the fields below where appropriate # - # Inputs - # ------ - {inputs_desc}# - # Outputs - # ------- - {outputs_desc}# # Docs # ---- # {doc_string}\n""" ).replace(" #", "#") - return preamble, file_inputs, file_outputs, genfile_outputs, multi_inputs + return ( + preamble, + input_helps, + output_helps, + file_inputs, + file_outputs, + genfile_outputs, + multi_inputs, + ) def extract_doctest_inputs( @@ -538,7 +573,7 @@ def extract_doctest_inputs( cmdline = re.sub(r"\s+", " ", cmdline) cmdline = cmdline.replace("'", '"') if '"' not in cmdline else cmdline directive = match.group(2) - if directive == '"': + if directive == '"' or directive == "'": directive = None else: cmdline = directive = None @@ -593,6 +628,9 @@ def extract_doctest_inputs( if not doctest_inpts: raise ValueError(f"Could not parse doctest:\n{doctest}") + if not directive or directive == "''" or directive == '""': + directive = None + return cmdline, doctest_inpts, directive, imports diff --git a/scripts/pkg_gen/resources/README.rst b/scripts/pkg_gen/resources/README.rst index 56efa9a7..2f2a74c6 100644 --- a/scripts/pkg_gen/resources/README.rst +++ b/scripts/pkg_gen/resources/README.rst @@ -85,6 +85,11 @@ Pydra task interfaces from their Nipype counterparts ``` $ pip install -r nipype-auto-conv/requirements.txt +``` + +The run the conversion script to convert Nipype interfaces to Pydra + +``` $ nipype-auto-conv/generate ``` diff --git a/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml index 3dcde127..c1a64f71 100644 --- a/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml +++ b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml @@ -6,10 +6,6 @@ name: Python package -# Set once -env: - SUBPACKAGE: CHANGEME - on: push: branches: [ main, develop ] @@ -21,7 +17,7 @@ on: jobs: - auto-gen: + nipype-conv: runs-on: ubuntu-latest steps: - name: Checkout @@ -39,7 +35,7 @@ jobs: run: ./nipype-auto-conv/generate - uses: actions/upload-artifact@v3 with: - name: auto-generated + name: converted-nipype path: pydra/tasks/CHANGEME/auto devcheck: @@ -57,10 +53,10 @@ jobs: - name: Revert version to most recent tag on upstream update if: github.event_name == 'repository_dispatch' run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - - name: Download auto-generated + - name: Download tasks converted from Nipype uses: actions/download-artifact@v3 with: - name: auto-generated + name: converted-nipype path: pydra/tasks/CHANGEME/auto - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -76,7 +72,7 @@ jobs: - name: Install task package run: | pip install ${{ matrix.pip-flags }} ".[dev]" - python -c "import pydra.tasks.$SUBPACKAGE as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" + python -c "import pydra.tasks.CHANGEME as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" test: @@ -90,9 +86,10 @@ jobs: - name: Revert version to most recent tag on upstream update if: github.event_name == 'repository_dispatch' run: git checkout $(git tag -l | tail -n 1 | awk -F post '{print $1}') - - uses: actions/download-artifact@v3 + - name: Download tasks converted from Nipype + uses: actions/download-artifact@v3 with: - name: auto-generated + name: converted-nipype path: pydra/tasks/CHANGEME/auto - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 @@ -104,12 +101,12 @@ jobs: - name: Install task package run: | pip install ".[test]" - python -c "import pydra.tasks.$SUBPACKAGE as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" + python -c "import pydra.tasks.CHANGEME as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" - name: Test with pytest run: | - pytest -sv --doctest-modules pydra/tasks/$SUBPACKAGE \ - --cov pydra.tasks.$SUBPACKAGE --cov-report xml + pytest -sv --doctest-modules pydra/tasks/CHANGEME \ + --cov pydra.tasks.CHANGEME --cov-report xml - uses: codecov/codecov-action@v3 if: ${{ always() }} @@ -124,9 +121,10 @@ jobs: with: submodules: recursive fetch-depth: 0 - - uses: actions/download-artifact@v3 + - name: Download tasks converted from Nipype + uses: actions/download-artifact@v3 with: - name: auto-generated + name: converted-nipype path: pydra/tasks/CHANGEME/auto - name: Tag release with a post-release based on Nipype and Nipype2Pydra versions if: github.event_name == 'repository_dispatch' From f72eca2e7dcaa2a52ea5661ab3fa12cc9b676f5f Mon Sep 17 00:00:00 2001 From: Tom Close Date: Thu, 3 Aug 2023 15:53:42 +1000 Subject: [PATCH 36/42] added dependency on nipype-conv step in GH action workflow --- scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml index c1a64f71..aa1a7a7b 100644 --- a/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml +++ b/scripts/pkg_gen/resources/gh_workflows/pythonpackage.yaml @@ -39,6 +39,7 @@ jobs: path: pydra/tasks/CHANGEME/auto devcheck: + needs: [nipype-conv] runs-on: ubuntu-latest strategy: matrix: @@ -76,6 +77,7 @@ jobs: python -c "import pydra as m; print(f'{m.__name__} {m.__version__} @ {m.__file__}')" test: + needs: [nipype-conv] runs-on: ubuntu-latest strategy: matrix: From f6439eeb78ba9168943d3fcd998b52f402e10537 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 16 Aug 2023 15:01:43 +1000 Subject: [PATCH 37/42] added option to select new yaml file with list of packages to import --- scripts/pkg_gen/create_packages.py | 11 ++++-- scripts/pkg_gen/fastsurfer-only.yaml | 5 +++ scripts/pkg_gen/resources/README.rst | 55 ++++++++++++++-------------- 3 files changed, 40 insertions(+), 31 deletions(-) create mode 100644 scripts/pkg_gen/fastsurfer-only.yaml diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 3dd4c481..4e6dfbb5 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -66,8 +66,10 @@ def download_tasks_template(output_path: Path): @click.argument("output_dir", type=click.Path(path_type=Path)) @click.option("--work-dir", type=click.Path(path_type=Path), default=None) @click.option("--task-template", type=click.Path(path_type=Path), default=None) +@click.option("--packages-to-import", type=click.Path(path_type=Path), default=None) def generate_packages( - output_dir: Path, work_dir: ty.Optional[Path], task_template: ty.Optional[Path] + output_dir: Path, work_dir: ty.Optional[Path], task_template: ty.Optional[Path], + packages_to_import: ty.Optional[Path] ): if work_dir is None: work_dir = Path(tempfile.mkdtemp()) @@ -80,9 +82,10 @@ def generate_packages( tar.extractall(path=extract_dir) task_template = extract_dir / next(extract_dir.iterdir()) - with open( - Path(__file__).parent.parent.parent / "nipype-interfaces-to-import.yaml" - ) as f: + if packages_to_import is None: + packages_to_import = Path(__file__).parent.parent.parent / "nipype-interfaces-to-import.yaml" + + with open(packages_to_import) as f: to_import = yaml.load(f, Loader=yaml.SafeLoader) # Wipe output dir diff --git a/scripts/pkg_gen/fastsurfer-only.yaml b/scripts/pkg_gen/fastsurfer-only.yaml new file mode 100644 index 00000000..1e78fd89 --- /dev/null +++ b/scripts/pkg_gen/fastsurfer-only.yaml @@ -0,0 +1,5 @@ +packages: +- fastsurfer +interfaces: + fastsurfer: + - FastSurfer \ No newline at end of file diff --git a/scripts/pkg_gen/resources/README.rst b/scripts/pkg_gen/resources/README.rst index 2f2a74c6..c0ef5b3a 100644 --- a/scripts/pkg_gen/resources/README.rst +++ b/scripts/pkg_gen/resources/README.rst @@ -35,23 +35,23 @@ Tests This package comes with a battery of automatically generated test modules. To install the necessary dependencies to run the tests -``` -$ pip install -e .[test] -``` +.. code-block:: + + $ pip install -e .[test] Then the tests, including `doctests` `__, can be launched using -``` -$ pytest --doctest-modules pydra/tasks/* -``` +.. code-block:: + + $ pytest --doctest-modules pydra/tasks/* By default, the tests are set to time-out after 10s, after which the underlying tool is assumed to have passed the validation/initialisation phase and we assume that it will run to completion. To disable this and run the test(s) through to completion run -``` -$ pytest --doctest-modules --timeout-pass 0 pydra/tasks/* -``` +.. code-block:: + + $ pytest --doctest-modules --timeout-pass 0 pydra/tasks/* Continuous integration ---------------------- @@ -75,23 +75,23 @@ Developer installation Install repo in developer mode from the source directory and install pre-commit to ensure consistent code-style and quality. -``` -$ pip install -e .[test,dev] +.. code-block:: + + $ pip install -e .[test,dev] $ pre-commit install -``` Next install the requirements for running the auto-conversion script and generate the Pydra task interfaces from their Nipype counterparts -``` -$ pip install -r nipype-auto-conv/requirements.txt -``` +.. code-block:: + + $ pip install -r nipype-auto-conv/requirements.txt The run the conversion script to convert Nipype interfaces to Pydra -``` -$ nipype-auto-conv/generate -``` +.. code-block:: + + $ nipype-auto-conv/generate ## Methodology @@ -114,23 +114,24 @@ to be edited by hand in order to assist the auto-converter produce valid pydra t After editing one or more conversion specs the `pydra.tasks.CHANGEME.auto` package should be regenerated by running -``` -$ nipype-auto-conv/generate -``` +.. code-block:: + + $ nipype-auto-conv/generate The tests should be run on the auto-generated tasks to see if they are valid -``` -$ pytest --doctest-modules pydra/tasks/CHANGEME/auto/tests/test_.py -``` +.. code-block:: + + $ pytest --doctest-modules pydra/tasks/CHANGEME/auto/tests/test_.py If the test passes you should then edit the `pydra/tasks/CHANGEME/v/__init__.py` file to import the now valid task interface to signify that it has been validated and is ready for use, e.g. -```python -from pydra.tasks.CHANGEME.auto import -``` +.. code-block::python + + from pydra.tasks.CHANGEME.auto import + Typing and sample test data ~~~~~~~~~~~~~~~~~~~~~~~~~~~ From 674910b46ebc2994e62c89814ec9d8dc8faabc63 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 16 Aug 2023 15:19:34 +1000 Subject: [PATCH 38/42] updated tests badge --- scripts/pkg_gen/resources/README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/pkg_gen/resources/README.rst b/scripts/pkg_gen/resources/README.rst index c0ef5b3a..4e5f72bd 100644 --- a/scripts/pkg_gen/resources/README.rst +++ b/scripts/pkg_gen/resources/README.rst @@ -2,8 +2,8 @@ Pydra task package for CHANGEME =============================== -.. image:: https://github.com/nipype/pydra-CHANGEME/actions/workflows/tests.yml/badge.svg - :target: https://github.com/nipype/pydra-CHANGEME/actions/workflows/tests.yml +.. image:: https://github.com/nipype/pydra-CHANGEME/actions/workflows/pythonpackage.yml/badge.svg + :target: https://github.com/nipype/pydra-CHANGEME/actions/workflows/pythonpackage.yml .. .. image:: https://codecov.io/gh/nipype/pydra-CHANGEME/branch/main/graph/badge.svg?token=UIS0OGPST7 .. :target: https://codecov.io/gh/nipype/pydra-CHANGEME .. image:: https://img.shields.io/pypi/pyversions/pydra-CHANGEME.svg From 54f8c3119165753cacaa84c26e6e01f6b5af5bd2 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 16 Aug 2023 15:24:13 +1000 Subject: [PATCH 39/42] commented out failing conftest copy --- tests/test_task.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_task.py b/tests/test_task.py index 94159ecd..6dbc5756 100644 --- a/tests/test_task.py +++ b/tests/test_task.py @@ -28,7 +28,7 @@ def test_task_conversion(task_spec_file, cli_runner, work_dir, gen_test_conftest task_spec = yaml.safe_load(f) pkg_root = work_dir / "src" pkg_root.mkdir() - shutil.copyfile(gen_test_conftest, pkg_root / "conftest.py") + # shutil.copyfile(gen_test_conftest, pkg_root / "conftest.py") output_module_path = f"nipype2pydratest.{task_spec_file.stem.lower()}" From ca60a9e51c2cef4ded56f0e0044704bb5d456a52 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 16 Aug 2023 15:26:32 +1000 Subject: [PATCH 40/42] added fileformats-datascience to deps --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index ffd54527..c030eda3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,7 @@ dependencies = [ "PyYAML>=6.0", "fileformats >=0.8", "fileformats-medimage >=0.4", + "fileformats-datascience", "traits", ] license = {file = "LICENSE"} From 4ecf344cc67067ef728c7dd45e8081a6d3c2e4c0 Mon Sep 17 00:00:00 2001 From: Tom Close Date: Tue, 5 Sep 2023 16:54:52 +1000 Subject: [PATCH 41/42] updated import location for fileformats Xml and Dicom --- scripts/pkg_gen/create_packages.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 4e6dfbb5..2259b1b4 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -17,10 +17,9 @@ import fileformats.core.mixin from fileformats.generic import File from fileformats.medimage import Nifti1, NiftiGz, Bval, Bvec -from fileformats.misc import Dicom +from fileformats.application import Dicom, Xml from fileformats.text import TextFile from fileformats.datascience import TextMatrix, DatFile -from fileformats.serialization import Xml import nipype.interfaces.base.core from nipype2pydra.task import ( InputsConverter, From 218acd3e85caa9f76327dcbccbbc4bdd7edeca7a Mon Sep 17 00:00:00 2001 From: Tom Close Date: Wed, 1 Nov 2023 11:56:40 +1100 Subject: [PATCH 42/42] fixed bug related to updated fileformats to_mime --- conftest.py | 7 --- scripts/pkg_gen/create_packages.py | 4 +- scripts/pkg_gen/freesurfer-only.yaml | 88 ++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 9 deletions(-) create mode 100644 scripts/pkg_gen/freesurfer-only.yaml diff --git a/conftest.py b/conftest.py index 38883a4e..9cad16c8 100644 --- a/conftest.py +++ b/conftest.py @@ -13,13 +13,6 @@ EXAMPLE_WORKFLOWS_DIR = EXAMPLE_SPECS_DIR / "workflow" -@File.generate_sample_data.register -def file_generate_sample_data(file: File, dest_dir: Path): - a_file = dest_dir / "a_file.x" - a_file.write_text("a sample file") - return [a_file] - - @pytest.fixture def gen_test_conftest(): return PKG_DIR / "scripts" / "pkg_gen" / "resources" / "conftest.py" diff --git a/scripts/pkg_gen/create_packages.py b/scripts/pkg_gen/create_packages.py index 2259b1b4..b79be4b1 100644 --- a/scripts/pkg_gen/create_packages.py +++ b/scripts/pkg_gen/create_packages.py @@ -323,7 +323,7 @@ def combine_types(type_, prev_type): InputsConverter, { "types": { - n: fileformats.core.utils.to_mime(t) + n: fileformats.core.utils.to_mime(t, official=False) for n, t in input_types.items() } }, @@ -333,7 +333,7 @@ def combine_types(type_, prev_type): OutputsConverter, { "types": { - n: fileformats.core.utils.to_mime(t) + n: fileformats.core.utils.to_mime(t, official=False) for n, t in output_types.items() }, "templates": output_templates, diff --git a/scripts/pkg_gen/freesurfer-only.yaml b/scripts/pkg_gen/freesurfer-only.yaml new file mode 100644 index 00000000..dec14f82 --- /dev/null +++ b/scripts/pkg_gen/freesurfer-only.yaml @@ -0,0 +1,88 @@ +packages: +- freesurfer +interfaces: + freesurfer: + - ParseDICOMDir + - UnpackSDICOMDir + - MRIConvert + - Resample + - ReconAll + - BBRegister + - ApplyVolTransform + - Smooth + - DICOMConvert + - RobustRegister + - FitMSParams + - SynthesizeFLASH + - MNIBiasCorrection + - WatershedSkullStrip + - Normalize + - CANormalize + - CARegister + - CALabel + - MRIsCALabel + - SegmentCC + - SegmentWM + - EditWMwithAseg + - ConcatenateLTA + - MRISPreproc + - MRISPreprocReconAll + - GLMFit + - OneSampleTTest + - Binarize + - Concatenate + - SegStats + - SegStatsReconAll + - Label2Vol + - MS_LDA + - Label2Label + - Label2Annot + - SphericalAverage + - SampleToSurface + - SurfaceSmooth + - SurfaceTransform + - Surface2VolTransform + - SurfaceSnapshots + - ApplyMask + - MRIsConvert + - MRITessellate + - MRIPretess + - MRIMarchingCubes + - SmoothTessellation + - MakeAverageSubject + - ExtractMainComponent + - Tkregister2 + - AddXFormToHeader + - CheckTalairachAlignment + - TalairachAVI + - TalairachQC + - RemoveNeck + - MRIFill + - MRIsInflate + - Sphere + - FixTopology + - EulerNumber + - RemoveIntersection + - MakeSurfaces + - Curvature + - CurvatureStats + - Jacobian + - MRIsCalc + - VolumeMask + - ParcellationStats + - Contrast + - RelabelHypointensities + - Aparc2Aseg + - Apas2Aseg + - MRIsExpand + - MRIsCombine + - RobustTemplate + - FuseSegmentations + - MPRtoMNI305 + - RegisterAVItoTalairach + - EMRegister + - Register + - Paint + - MRICoreg + - GTMSeg + - GTMPVC