diff --git a/.codespellrc b/.codespellrc deleted file mode 100644 index c3df6c4ef1d..00000000000 --- a/.codespellrc +++ /dev/null @@ -1,5 +0,0 @@ -[codespell] -skip = .git,*.pdf,*.svg,numpydoc.py,viz-report.html -# objekt - used in the code purposefully different from object -# nd - import scipy.ndimage as nd -ignore-words-list = objekt,nd diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 1ead5e43b71..00000000000 --- a/.flake8 +++ /dev/null @@ -1,12 +0,0 @@ -[flake8] -max-line-length = 99 -doctests = False -ignore = - W503 - E203 -exclude= - *build/, - */_version.py -per-file-ignores = - **/__init__.py : F401 - docs/conf.py : E265 diff --git a/.github/workflows/pythonpackage.yml b/.github/workflows/tox.yml similarity index 90% rename from .github/workflows/pythonpackage.yml rename to .github/workflows/tox.yml index f0474c03629..99e932298f7 100644 --- a/.github/workflows/pythonpackage.yml +++ b/.github/workflows/tox.yml @@ -158,13 +158,20 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} if: ${{ always() }} - flake8: - if: github.event_name != 'schedule' - runs-on: ubuntu-latest + checks: + runs-on: 'ubuntu-latest' + continue-on-error: true + strategy: + matrix: + check: ['style', 'spellcheck'] + steps: - - uses: actions/checkout@v4 - - name: Set up Python 3 - uses: actions/setup-python@v5 - with: - python-version: 3 - - run: pipx run flake8 niworkflows/ + - uses: actions/checkout@v4 + - name: Install the latest version of uv + uses: astral-sh/setup-uv@v3 + - name: Show tox config + run: uvx tox c + - name: Show tox config (this call) + run: uvx tox c -e ${{ matrix.check }} + - name: Run check + run: uvx tox -e ${{ matrix.check }} diff --git a/.pep8speaks.yml b/.pep8speaks.yml deleted file mode 100644 index 0559ae81723..00000000000 --- a/.pep8speaks.yml +++ /dev/null @@ -1,12 +0,0 @@ -scanner: - diff_only: True # Only show errors caused by the patch - linter: flake8 - -message: # Customize the comment made by the bot - opened: # Messages when a new PR is submitted - header: "Hello @{name}, thank you for submitting the Pull Request!" - footer: "To test for issues locally, `pip install flake8` and then run `flake8 niworkflows`." - updated: # Messages when new commits are added to the PR - header: "Hello @{name}, Thank you for updating!" - footer: "To test for issues locally, `pip install flake8` and then run `flake8 niworkflows`." - no_errors: "Cheers! There are no style issues detected in this Pull Request. :beers: " diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..aa3f246537a --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,24 @@ +exclude: ".*/data/.*" +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + exclude: 'examples' + - id: end-of-file-fixer + exclude: 'examples|docs/_static/.*\.(css|js)' + - id: check-yaml + - id: check-json + - id: check-toml + - id: check-added-large-files + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.7.4 + hooks: + - name: ruff check --fix + id: ruff + args: [ --fix ] + - name: ruff format + id: ruff-format + - name: fix implicit string concatenation + id: ruff + args: [ --select, ISC001, --fix ] diff --git a/LICENSE b/LICENSE index fe9b3eabfc5..8efda52223a 100644 --- a/LICENSE +++ b/LICENSE @@ -199,4 +199,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - diff --git a/docker/fetch_templates.py b/docker/fetch_templates.py index bc81bf8e229..8991ee118cf 100755 --- a/docker/fetch_templates.py +++ b/docker/fetch_templates.py @@ -19,11 +19,11 @@ def fetch_MNI2009(): tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref.nii.gz tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-01_label-brain_probseg.nii.gz """ - template = "MNI152NLin2009cAsym" + template = 'MNI152NLin2009cAsym' - tf.get(template, resolution=2, desc="brain", suffix="mask") - tf.get(template, resolution=2, desc="fMRIPrep", suffix="boldref") - tf.get(template, resolution=1, label="brain", suffix="probseg") + tf.get(template, resolution=2, desc='brain', suffix='mask') + tf.get(template, resolution=2, desc='fMRIPrep', suffix='boldref') + tf.get(template, resolution=1, label='brain', suffix='probseg') def fetch_MNI152Lin(): @@ -33,10 +33,10 @@ def fetch_MNI152Lin(): tpl-MNI152Lin/tpl-MNI152Lin_res-02_T1w.nii.gz tpl-MNI152Lin/tpl-MNI152Lin_res-02_desc-brain_mask.nii.gz """ - template = "MNI152Lin" + template = 'MNI152Lin' - tf.get(template, resolution=2, desc=None, suffix="T1w") - tf.get(template, resolution=2, desc="brain", suffix="mask") + tf.get(template, resolution=2, desc=None, suffix='T1w') + tf.get(template, resolution=2, desc='brain', suffix='mask') def fetch_OASIS(): @@ -50,13 +50,13 @@ def fetch_OASIS(): tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-brain_mask.nii.gz tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-BrainCerebellumExtraction_mask.nii.gz """ - template = "OASIS30ANTs" + template = 'OASIS30ANTs' - tf.get(template, resolution=1, desc="4", suffix="dseg") - tf.get(template, resolution=1, desc=None, suffix="T1w") - tf.get(template, resolution=1, label="brain", suffix="mask") - tf.get(template, resolution=1, desc="BrainCerebellumExtraction", suffix="mask") - tf.get(template, resolution=1, label="brain", suffix="probseg") + tf.get(template, resolution=1, desc='4', suffix='dseg') + tf.get(template, resolution=1, desc=None, suffix='T1w') + tf.get(template, resolution=1, label='brain', suffix='mask') + tf.get(template, resolution=1, desc='BrainCerebellumExtraction', suffix='mask') + tf.get(template, resolution=1, label='brain', suffix='probseg') def fetch_fsaverage(): @@ -70,10 +70,10 @@ def fetch_fsaverage(): tpl-fsaverage/tpl-fsaverage_hemi-L_den-164k_midthickness.surf.gii tpl-fsaverage/tpl-fsaverage_hemi-R_den-164k_midthickness.surf.gii """ - template = "fsaverage" + template = 'fsaverage' - tf.get(template, density="164k", desc="std", suffix="sphere") - tf.get(template, density="164k", suffix="midthickness") + tf.get(template, density='164k', desc='std', suffix='sphere') + tf.get(template, density='164k', suffix='midthickness') def fetch_fsLR(): @@ -89,7 +89,7 @@ def fetch_fsLR(): tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-L_den-32k_sphere.surf.gii tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-R_den-32k_sphere.surf.gii """ - tf.get("fsLR", density="32k") + tf.get('fsLR', density='32k') def fetch_all(): @@ -100,21 +100,21 @@ def fetch_all(): fetch_fsLR() -if __name__ == "__main__": +if __name__ == '__main__': parser = argparse.ArgumentParser( - description="Helper script for pre-caching required templates to run fMRIPrep", + description='Helper script for pre-caching required templates to run fMRIPrep', ) parser.add_argument( - "--tf-dir", + '--tf-dir', type=os.path.abspath, - help="Directory to save templates in. If not provided, templates will be saved to" - " `${HOME}/.cache/templateflow`.", + help='Directory to save templates in. If not provided, templates will be saved to' + ' `${HOME}/.cache/templateflow`.', ) opts = parser.parse_args() # set envvar (if necessary) prior to templateflow import if opts.tf_dir is not None: - os.environ["TEMPLATEFLOW_HOME"] = opts.tf_dir + os.environ['TEMPLATEFLOW_HOME'] = opts.tf_dir import templateflow.api as tf diff --git a/docs/conf.py b/docs/conf.py index a8284a0fa80..d9b08442052 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,21 +12,21 @@ # import os import sys -from unittest import mock import tempfile +from unittest import mock -from packaging.version import Version import templateflow +from packaging.version import Version # Prevent etelemetry from loading at all # Could set NO_ET environment variable, but why? MOCKS = [ - "etelemetry", - "matplotlib", - "matplotlib.pyplot", - "matplotlib.cm", - "matplotlib.colors", - "matplotlib.colorbar", + 'etelemetry', + 'matplotlib', + 'matplotlib.pyplot', + 'matplotlib.cm', + 'matplotlib.colors', + 'matplotlib.colorbar', ] sys.modules.update({mod: mock.Mock() for mod in MOCKS}) @@ -36,14 +36,14 @@ os.close(tffiledesc) templateflow.api.get = mock.MagicMock(return_value=tffilename) -from niworkflows import __version__, __copyright__, __packagename__ +from niworkflows import __copyright__, __packagename__, __version__ # noqa:E402 -sys.path.append(os.path.abspath("sphinxext")) +sys.path.append(os.path.abspath('sphinxext')) # -- Project information ----------------------------------------------------- project = __packagename__ copyright = __copyright__ -author = "The NiPreps Developers" +author = 'The NiPreps Developers' # The short X.Y version version = Version(__version__).base_version @@ -53,30 +53,30 @@ # -- General configuration --------------------------------------------------- extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.coverage", - "sphinx.ext.doctest", - "sphinx.ext.githubpages", - "sphinx.ext.ifconfig", - "sphinx.ext.intersphinx", - "sphinx.ext.mathjax", - "sphinx.ext.viewcode", - "sphinxcontrib.apidoc", - "nipype.sphinxext.apidoc", - "nipype.sphinxext.plot_workflow", + 'sphinx.ext.autodoc', + 'sphinx.ext.coverage', + 'sphinx.ext.doctest', + 'sphinx.ext.githubpages', + 'sphinx.ext.ifconfig', + 'sphinx.ext.intersphinx', + 'sphinx.ext.mathjax', + 'sphinx.ext.viewcode', + 'sphinxcontrib.apidoc', + 'nipype.sphinxext.apidoc', + 'nipype.sphinxext.plot_workflow', ] autodoc_mock_imports = [ - "nilearn", - "nitime", - "numpy", - "pandas", - "seaborn", - "skimage", - "svgutils", - "templateflow", - "transforms3d", - "yaml", + 'nilearn', + 'nitime', + 'numpy', + 'pandas', + 'seaborn', + 'skimage', + 'svgutils', + 'templateflow', + 'transforms3d', + 'yaml', ] # Accept custom section names to be parsed for numpy-style docstrings @@ -85,24 +85,24 @@ # https://github.com/sphinx-contrib/napoleon/pull/10 is merged. napoleon_use_param = False napoleon_custom_sections = [ - ("Inputs", "Parameters"), - ("Outputs", "Parameters"), - ("Attributes", "Parameters"), - ("Mandatory Inputs", "Parameters"), - ("Optional Inputs", "Parameters"), + ('Inputs', 'Parameters'), + ('Outputs', 'Parameters'), + ('Attributes', 'Parameters'), + ('Mandatory Inputs', 'Parameters'), + ('Optional Inputs', 'Parameters'), ] # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = ".rst" +source_suffix = '.rst' # The master toctree document. -master_doc = "index" +master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -115,11 +115,11 @@ # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ - "_build", - "Thumbs.db", - ".DS_Store", - "api/modules.rst", - "api/niworkflows.rst", + '_build', + 'Thumbs.db', + '.DS_Store', + 'api/modules.rst', + 'api/niworkflows.rst', ] # The name of the Pygments (syntax highlighting) style to use. @@ -131,7 +131,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = "furo" +html_theme = 'furo' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -142,12 +142,12 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] html_js_files = [ - "js/version-switch.js", + 'js/version-switch.js', ] html_css_files = [ - "css/version-switch.css", + 'css/version-switch.css', ] # Custom sidebar templates, must be a dictionary that maps document names @@ -164,7 +164,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = "niworkflowsdoc" +htmlhelp_basename = 'niworkflowsdoc' # -- Options for LaTeX output ------------------------------------------------ @@ -190,10 +190,10 @@ latex_documents = [ ( master_doc, - "niworkflows.tex", - "NiWorkflows Documentation", - "The NiPreps Developers", - "manual", + 'niworkflows.tex', + 'NiWorkflows Documentation', + 'The NiPreps Developers', + 'manual', ), ] @@ -202,7 +202,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "niworkflows", "NiWorkflows Documentation", [author], 1)] +man_pages = [(master_doc, 'niworkflows', 'NiWorkflows Documentation', [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -213,12 +213,12 @@ texinfo_documents = [ ( master_doc, - "niworkflows", - "NiWorkflows Documentation", + 'niworkflows', + 'NiWorkflows Documentation', author, - "NiWorkflows", - "One line description of project.", - "Miscellaneous", + 'NiWorkflows', + 'One line description of project.', + 'Miscellaneous', ), ] @@ -238,32 +238,32 @@ # epub_uid = '' # A list of files that should not be packed into the epub file. -epub_exclude_files = ["search.html"] +epub_exclude_files = ['search.html'] # -- Extension configuration ------------------------------------------------- -apidoc_module_dir = "../niworkflows" -apidoc_output_dir = "api" -apidoc_excluded_paths = ["conftest.py", "*/tests/*", "tests/*", "testing.py"] +apidoc_module_dir = '../niworkflows' +apidoc_output_dir = 'api' +apidoc_excluded_paths = ['conftest.py', '*/tests/*', 'tests/*', 'testing.py'] apidoc_separate_modules = True -apidoc_extra_args = ["--module-first", "-d 1", "-T"] +apidoc_extra_args = ['--module-first', '-d 1', '-T'] # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "bids": ("https://bids-standard.github.io/pybids/", None), - "matplotlib": ("https://matplotlib.org/stable", None), - "nibabel": ("https://nipy.org/nibabel/", None), - "nipype": ("https://nipype.readthedocs.io/en/latest/", None), - "numpy": ("https://numpy.org/doc/stable/", None), - "pandas": ("https://pandas.pydata.org/pandas-docs/dev", None), - "python": ("https://docs.python.org/3/", None), - "scipy": ("https://docs.scipy.org/doc/scipy/", None), - "smriprep": ("https://www.nipreps.org/smriprep/", None), - "surfplot": ("https://surfplot.readthedocs.io/en/latest/", None), - "templateflow": ("https://www.templateflow.org/python-client", None), + 'bids': ('https://bids-standard.github.io/pybids/', None), + 'matplotlib': ('https://matplotlib.org/stable', None), + 'nibabel': ('https://nipy.org/nibabel/', None), + 'nipype': ('https://nipype.readthedocs.io/en/latest/', None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None), + 'python': ('https://docs.python.org/3/', None), + 'scipy': ('https://docs.scipy.org/doc/scipy/', None), + 'smriprep': ('https://www.nipreps.org/smriprep/', None), + 'surfplot': ('https://surfplot.readthedocs.io/en/latest/', None), + 'templateflow': ('https://www.templateflow.org/python-client', None), } # -- Options for versioning extension ---------------------------------------- diff --git a/docs/index.rst b/docs/index.rst index 07d1aa73752..96b1ab5047d 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,4 +8,4 @@ Contents installation api - changelog \ No newline at end of file + changelog diff --git a/docs/sphinxext/docscrape.py b/docs/sphinxext/docscrape.py deleted file mode 100644 index fb3a0b6347e..00000000000 --- a/docs/sphinxext/docscrape.py +++ /dev/null @@ -1,768 +0,0 @@ -"""Extract reference documentation from the NumPy source tree. - -""" - -import inspect -import textwrap -import re -import pydoc -from warnings import warn -from collections import namedtuple -from collections.abc import Callable, Mapping -import copy -import sys - -from functools import cached_property - - -def strip_blank_lines(l): - "Remove leading and trailing blank lines from a list of lines" - while l and not l[0].strip(): - del l[0] - while l and not l[-1].strip(): - del l[-1] - return l - - -class Reader: - """A line-based string reader.""" - - def __init__(self, data): - """ - Parameters - ---------- - data : str - String with lines separated by '\\n'. - - """ - if isinstance(data, list): - self._str = data - else: - self._str = data.split("\n") # store string as list of lines - - self.reset() - - def __getitem__(self, n): - return self._str[n] - - def reset(self): - self._l = 0 # current line nr - - def read(self): - if not self.eof(): - out = self[self._l] - self._l += 1 - return out - else: - return "" - - def seek_next_non_empty_line(self): - for l in self[self._l :]: - if l.strip(): - break - else: - self._l += 1 - - def eof(self): - return self._l >= len(self._str) - - def read_to_condition(self, condition_func): - start = self._l - for line in self[start:]: - if condition_func(line): - return self[start : self._l] - self._l += 1 - if self.eof(): - return self[start : self._l + 1] - return [] - - def read_to_next_empty_line(self): - self.seek_next_non_empty_line() - - def is_empty(line): - return not line.strip() - - return self.read_to_condition(is_empty) - - def read_to_next_unindented_line(self): - def is_unindented(line): - return line.strip() and (len(line.lstrip()) == len(line)) - - return self.read_to_condition(is_unindented) - - def peek(self, n=0): - if self._l + n < len(self._str): - return self[self._l + n] - else: - return "" - - def is_empty(self): - return not "".join(self._str).strip() - - -class ParseError(Exception): - def __str__(self): - message = self.args[0] - if hasattr(self, "docstring"): - message = f"{message} in {self.docstring!r}" - return message - - -Parameter = namedtuple("Parameter", ["name", "type", "desc"]) - - -class NumpyDocString(Mapping): - """Parses a numpydoc string to an abstract representation - - Instances define a mapping from section title to structured data. - - """ - - sections = { - "Signature": "", - "Summary": [""], - "Extended Summary": [], - "Parameters": [], - "Returns": [], - "Yields": [], - "Receives": [], - "Raises": [], - "Warns": [], - "Other Parameters": [], - "Attributes": [], - "Methods": [], - "See Also": [], - "Notes": [], - "Warnings": [], - "References": "", - "Examples": "", - "index": {}, - } - - def __init__(self, docstring, config=None): - orig_docstring = docstring - docstring = textwrap.dedent(docstring).split("\n") - - self._doc = Reader(docstring) - self._parsed_data = copy.deepcopy(self.sections) - - try: - self._parse() - except ParseError as e: - e.docstring = orig_docstring - raise - - def __getitem__(self, key): - return self._parsed_data[key] - - def __setitem__(self, key, val): - if key not in self._parsed_data: - self._error_location(f"Unknown section {key}", error=False) - else: - self._parsed_data[key] = val - - def __iter__(self): - return iter(self._parsed_data) - - def __len__(self): - return len(self._parsed_data) - - def _is_at_section(self): - self._doc.seek_next_non_empty_line() - - if self._doc.eof(): - return False - - l1 = self._doc.peek().strip() # e.g. Parameters - - if l1.startswith(".. index::"): - return True - - l2 = self._doc.peek(1).strip() # ---------- or ========== - if len(l2) >= 3 and (set(l2) in ({"-"}, {"="})) and len(l2) != len(l1): - snip = "\n".join(self._doc._str[:2]) + "..." - self._error_location( - f"potentially wrong underline length... \n{l1} \n{l2} in \n{snip}", - error=False, - ) - return l2.startswith("-" * len(l1)) or l2.startswith("=" * len(l1)) - - def _strip(self, doc): - i = 0 - j = 0 - for i, line in enumerate(doc): - if line.strip(): - break - - for j, line in enumerate(doc[::-1]): - if line.strip(): - break - - return doc[i : len(doc) - j] - - def _read_to_next_section(self): - section = self._doc.read_to_next_empty_line() - - while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty - section += [""] - - section += self._doc.read_to_next_empty_line() - - return section - - def _read_sections(self): - while not self._doc.eof(): - data = self._read_to_next_section() - name = data[0].strip() - - if name.startswith(".."): # index section - yield name, data[1:] - elif len(data) < 2: - yield StopIteration - else: - yield name, self._strip(data[2:]) - - def _parse_param_list(self, content, single_element_is_type=False): - content = dedent_lines(content) - r = Reader(content) - params = [] - while not r.eof(): - header = r.read().strip() - if " : " in header: - arg_name, arg_type = header.split(" : ", maxsplit=1) - else: - # NOTE: param line with single element should never have a - # a " :" before the description line, so this should probably - # warn. - if header.endswith(" :"): - header = header[:-2] - if single_element_is_type: - arg_name, arg_type = "", header - else: - arg_name, arg_type = header, "" - - desc = r.read_to_next_unindented_line() - desc = dedent_lines(desc) - desc = strip_blank_lines(desc) - - params.append(Parameter(arg_name, arg_type, desc)) - - return params - - # See also supports the following formats. - # - # - # SPACE* COLON SPACE+ SPACE* - # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE* - # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE* - - # is one of - # - # COLON COLON BACKTICK BACKTICK - # where - # is a legal function name, and - # is any nonempty sequence of word characters. - # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j` - # is a string describing the function. - - _role = r":(?P(py:)?\w+):" - _funcbacktick = r"`(?P(?:~\w+\.)?[a-zA-Z0-9_\.-]+)`" - _funcplain = r"(?P[a-zA-Z0-9_\.-]+)" - _funcname = r"(" + _role + _funcbacktick + r"|" + _funcplain + r")" - _funcnamenext = _funcname.replace("role", "rolenext") - _funcnamenext = _funcnamenext.replace("name", "namenext") - _description = r"(?P\s*:(\s+(?P\S+.*))?)?\s*$" - _func_rgx = re.compile(r"^\s*" + _funcname + r"\s*") - _line_rgx = re.compile( - r"^\s*" - + r"(?P" - + _funcname # group for all function names - + r"(?P([,]\s+" - + _funcnamenext - + r")*)" - + r")" - + r"(?P[,\.])?" # end of "allfuncs" - + _description # Some function lists have a trailing comma (or period) '\s*' - ) - - # Empty elements are replaced with '..' - empty_description = ".." - - def _parse_see_also(self, content): - """ - func_name : Descriptive text - continued text - another_func_name : Descriptive text - func_name1, func_name2, :meth:`func_name`, func_name3 - - """ - - content = dedent_lines(content) - - items = [] - - def parse_item_name(text): - """Match ':role:`name`' or 'name'.""" - m = self._func_rgx.match(text) - if not m: - self._error_location(f"Error parsing See Also entry {line!r}") - role = m.group("role") - name = m.group("name") if role else m.group("name2") - return name, role, m.end() - - rest = [] - for line in content: - if not line.strip(): - continue - - line_match = self._line_rgx.match(line) - description = None - if line_match: - description = line_match.group("desc") - if line_match.group("trailing") and description: - self._error_location( - "Unexpected comma or period after function list at index %d of " - 'line "%s"' % (line_match.end("trailing"), line), - error=False, - ) - if not description and line.startswith(" "): - rest.append(line.strip()) - elif line_match: - funcs = [] - text = line_match.group("allfuncs") - while True: - if not text.strip(): - break - name, role, match_end = parse_item_name(text) - funcs.append((name, role)) - text = text[match_end:].strip() - if text and text[0] == ",": - text = text[1:].strip() - rest = list(filter(None, [description])) - items.append((funcs, rest)) - else: - self._error_location(f"Error parsing See Also entry {line!r}") - return items - - def _parse_index(self, section, content): - """ - .. index: default - :refguide: something, else, and more - - """ - - def strip_each_in(lst): - return [s.strip() for s in lst] - - out = {} - section = section.split("::") - if len(section) > 1: - out["default"] = strip_each_in(section[1].split(","))[0] - for line in content: - line = line.split(":") - if len(line) > 2: - out[line[1]] = strip_each_in(line[2].split(",")) - return out - - def _parse_summary(self): - """Grab signature (if given) and summary""" - if self._is_at_section(): - return - - # If several signatures present, take the last one - while True: - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - compiled = re.compile(r"^([\w., ]+=)?\s*[\w\.]+\(.*\)$") - if compiled.match(summary_str): - self["Signature"] = summary_str - if not self._is_at_section(): - continue - break - - if summary is not None: - self["Summary"] = summary - - if not self._is_at_section(): - self["Extended Summary"] = self._read_to_next_section() - - def _parse(self): - self._doc.reset() - self._parse_summary() - - sections = list(self._read_sections()) - section_names = {section for section, content in sections} - - has_returns = "Returns" in section_names - has_yields = "Yields" in section_names - # We could do more tests, but we are not. Arbitrarily. - if has_returns and has_yields: - msg = "Docstring contains both a Returns and Yields section." - raise ValueError(msg) - if not has_yields and "Receives" in section_names: - msg = "Docstring contains a Receives section but not Yields." - raise ValueError(msg) - - for section, content in sections: - if not section.startswith(".."): - section = (s.capitalize() for s in section.split(" ")) - section = " ".join(section) - if self.get(section): - self._error_location( - "The section %s appears twice in %s" - % (section, "\n".join(self._doc._str)) - ) - - if section in ("Parameters", "Other Parameters", "Attributes", "Methods"): - self[section] = self._parse_param_list(content) - elif section in ("Returns", "Yields", "Raises", "Warns", "Receives"): - self[section] = self._parse_param_list( - content, single_element_is_type=True - ) - elif section.startswith(".. index::"): - self["index"] = self._parse_index(section, content) - elif section == "See Also": - self["See Also"] = self._parse_see_also(content) - else: - self[section] = content - - @property - def _obj(self): - if hasattr(self, "_cls"): - return self._cls - elif hasattr(self, "_f"): - return self._f - return None - - def _error_location(self, msg, error=True): - if self._obj is not None: - # we know where the docs came from: - try: - filename = inspect.getsourcefile(self._obj) - except TypeError: - filename = None - # Make UserWarning more descriptive via object introspection. - # Skip if introspection fails - name = getattr(self._obj, "__name__", None) - if name is None: - name = getattr(getattr(self._obj, "__class__", None), "__name__", None) - if name is not None: - msg += f" in the docstring of {name}" - msg += f" in {filename}." if filename else "" - if error: - raise ValueError(msg) - else: - warn(msg) - - # string conversion routines - - def _str_header(self, name, symbol="-"): - return [name, len(name) * symbol] - - def _str_indent(self, doc, indent=4): - return [" " * indent + line for line in doc] - - def _str_signature(self): - if self["Signature"]: - return [self["Signature"].replace("*", r"\*")] + [""] - return [""] - - def _str_summary(self): - if self["Summary"]: - return self["Summary"] + [""] - return [] - - def _str_extended_summary(self): - if self["Extended Summary"]: - return self["Extended Summary"] + [""] - return [] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_header(name) - for param in self[name]: - parts = [] - if param.name: - parts.append(param.name) - if param.type: - parts.append(param.type) - out += [" : ".join(parts)] - if param.desc and "".join(param.desc).strip(): - out += self._str_indent(param.desc) - out += [""] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += self[name] - out += [""] - return out - - def _str_see_also(self, func_role): - if not self["See Also"]: - return [] - out = [] - out += self._str_header("See Also") - out += [""] - last_had_desc = True - for funcs, desc in self["See Also"]: - assert isinstance(funcs, list) - links = [] - for func, role in funcs: - if role: - link = f":{role}:`{func}`" - elif func_role: - link = f":{func_role}:`{func}`" - else: - link = f"`{func}`_" - links.append(link) - link = ", ".join(links) - out += [link] - if desc: - out += self._str_indent([" ".join(desc)]) - last_had_desc = True - else: - last_had_desc = False - out += self._str_indent([self.empty_description]) - - if last_had_desc: - out += [""] - out += [""] - return out - - def _str_index(self): - idx = self["index"] - out = [] - output_index = False - default_index = idx.get("default", "") - if default_index: - output_index = True - out += [f".. index:: {default_index}"] - for section, references in idx.items(): - if section == "default": - continue - output_index = True - out += [f" :{section}: {', '.join(references)}"] - if output_index: - return out - return "" - - def __str__(self, func_role=""): - out = [] - out += self._str_signature() - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ( - "Parameters", - "Returns", - "Yields", - "Receives", - "Other Parameters", - "Raises", - "Warns", - ): - out += self._str_param_list(param_list) - out += self._str_section("Warnings") - out += self._str_see_also(func_role) - for s in ("Notes", "References", "Examples"): - out += self._str_section(s) - for param_list in ("Attributes", "Methods"): - out += self._str_param_list(param_list) - out += self._str_index() - return "\n".join(out) - - -def dedent_lines(lines): - """Deindent a list of lines maximally""" - return textwrap.dedent("\n".join(lines)).split("\n") - - -class FunctionDoc(NumpyDocString): - def __init__(self, func, role="func", doc=None, config=None): - self._f = func - self._role = role # e.g. "func" or "meth" - - if doc is None: - if func is None: - raise ValueError("No function or docstring given") - doc = inspect.getdoc(func) or "" - if config is None: - config = {} - NumpyDocString.__init__(self, doc, config) - - def get_func(self): - func_name = getattr(self._f, "__name__", self.__class__.__name__) - if inspect.isclass(self._f): - func = getattr(self._f, "__call__", self._f.__init__) - else: - func = self._f - return func, func_name - - def __str__(self): - out = "" - - func, func_name = self.get_func() - - roles = {"func": "function", "meth": "method"} - - if self._role: - if self._role not in roles: - print(f"Warning: invalid role {self._role}") - out += f".. {roles.get(self._role, '')}:: {func_name}\n \n\n" - - out += super().__str__(func_role=self._role) - return out - - -class ObjDoc(NumpyDocString): - def __init__(self, obj, doc=None, config=None): - self._f = obj - if config is None: - config = {} - NumpyDocString.__init__(self, doc, config=config) - - -class ClassDoc(NumpyDocString): - extra_public_methods = ["__call__"] - - def __init__(self, cls, doc=None, modulename="", func_doc=FunctionDoc, config=None): - if not inspect.isclass(cls) and cls is not None: - raise ValueError(f"Expected a class or None, but got {cls!r}") - self._cls = cls - - if "sphinx" in sys.modules: - from sphinx.ext.autodoc import ALL - else: - ALL = object() - - if config is None: - config = {} - self.show_inherited_members = config.get("show_inherited_class_members", True) - - if modulename and not modulename.endswith("."): - modulename += "." - self._mod = modulename - - if doc is None: - if cls is None: - raise ValueError("No class or documentation string given") - doc = pydoc.getdoc(cls) - - NumpyDocString.__init__(self, doc) - - _members = config.get("members", []) - if _members is ALL: - _members = None - _exclude = config.get("exclude-members", []) - - if config.get("show_class_members", True) and _exclude is not ALL: - - def splitlines_x(s): - if not s: - return [] - else: - return s.splitlines() - - for field, items in [ - ("Methods", self.methods), - ("Attributes", self.properties), - ]: - if not self[field]: - doc_list = [] - for name in sorted(items): - if name in _exclude or (_members and name not in _members): - continue - try: - doc_item = pydoc.getdoc(getattr(self._cls, name)) - doc_list.append(Parameter(name, "", splitlines_x(doc_item))) - except AttributeError: - pass # method doesn't exist - self[field] = doc_list - - @property - def methods(self): - if self._cls is None: - return [] - return [ - name - for name, func in inspect.getmembers(self._cls) - if ( - (not name.startswith("_") or name in self.extra_public_methods) - and isinstance(func, Callable) - and self._is_show_member(name) - ) - ] - - @property - def properties(self): - if self._cls is None: - return [] - return [ - name - for name, func in inspect.getmembers(self._cls) - if ( - not name.startswith("_") - and not self._should_skip_member(name, self._cls) - and ( - func is None - or isinstance(func, (property, cached_property)) - or inspect.isdatadescriptor(func) - ) - and self._is_show_member(name) - ) - ] - - @staticmethod - def _should_skip_member(name, klass): - if ( - # Namedtuples should skip everything in their ._fields as the - # docstrings for each of the members is: "Alias for field number X" - issubclass(klass, tuple) - and hasattr(klass, "_asdict") - and hasattr(klass, "_fields") - and name in klass._fields - ): - return True - return False - - def _is_show_member(self, name): - if self.show_inherited_members: - return True # show all class members - if name not in self._cls.__dict__: - return False # class member is inherited, we do not show it - return True - - -def get_doc_object( - obj, - what=None, - doc=None, - config=None, - class_doc=ClassDoc, - func_doc=FunctionDoc, - obj_doc=ObjDoc, -): - if what is None: - if inspect.isclass(obj): - what = "class" - elif inspect.ismodule(obj): - what = "module" - elif isinstance(obj, Callable): - what = "function" - else: - what = "object" - if config is None: - config = {} - - if what == "class": - return class_doc(obj, func_doc=func_doc, doc=doc, config=config) - elif what in ("function", "method"): - return func_doc(obj, doc=doc, config=config) - else: - if doc is None: - doc = pydoc.getdoc(obj) - return obj_doc(obj, doc, config=config) diff --git a/docs/sphinxext/docscrape_sphinx.py b/docs/sphinxext/docscrape_sphinx.py deleted file mode 100644 index 771c1ea445d..00000000000 --- a/docs/sphinxext/docscrape_sphinx.py +++ /dev/null @@ -1,434 +0,0 @@ -import re -import inspect -import textwrap -import pydoc -from collections.abc import Callable -import os - -from jinja2 import FileSystemLoader -from jinja2.sandbox import SandboxedEnvironment -import sphinx -from sphinx.jinja2glue import BuiltinTemplateLoader - -from .docscrape import NumpyDocString, FunctionDoc, ClassDoc, ObjDoc -from .docscrape import get_doc_object as get_doc_object_orig -from .xref import make_xref - - -IMPORT_MATPLOTLIB_RE = r"\b(import +matplotlib|from +matplotlib +import)\b" - - -class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config=None): - if config is None: - config = {} - NumpyDocString.__init__(self, docstring, config=config) - self.load_config(config) - - def load_config(self, config): - self.use_plots = config.get("use_plots", False) - self.class_members_toctree = config.get("class_members_toctree", True) - self.attributes_as_param_list = config.get("attributes_as_param_list", True) - self.xref_param_type = config.get("xref_param_type", False) - self.xref_aliases = config.get("xref_aliases", dict()) - self.xref_ignore = config.get("xref_ignore", set()) - self.template = config.get("template", None) - if self.template is None: - template_dirs = [os.path.join(os.path.dirname(__file__), "templates")] - template_loader = FileSystemLoader(template_dirs) - template_env = SandboxedEnvironment(loader=template_loader) - self.template = template_env.get_template("numpydoc_docstring.rst") - - # string conversion routines - def _str_header(self, name, symbol="`"): - return [".. rubric:: " + name, ""] - - def _str_field_list(self, name): - return [":" + name + ":"] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [" " * indent + line] - return out - - def _str_signature(self): - return [""] - - def _str_summary(self): - return self["Summary"] + [""] - - def _str_extended_summary(self): - return self["Extended Summary"] + [""] - - def _str_returns(self, name="Returns"): - named_fmt = "**%s** : %s" - unnamed_fmt = "%s" - - out = [] - if self[name]: - out += self._str_field_list(name) - out += [""] - for param in self[name]: - param_type = param.type - if param_type and self.xref_param_type: - param_type = make_xref( - param_type, self.xref_aliases, self.xref_ignore - ) - if param.name: - out += self._str_indent( - [named_fmt % (param.name.strip(), param_type)] - ) - else: - out += self._str_indent([unnamed_fmt % param_type.strip()]) - if not param.desc: - out += self._str_indent([".."], 8) - else: - out += self._str_indent(param.desc, 8) - out += [""] - return out - - def _escape_args_and_kwargs(self, name): - if name[:2] == "**": - return r"\*\*" + name[2:] - elif name[:1] == "*": - return r"\*" + name[1:] - else: - return name - - def _process_param(self, param, desc, fake_autosummary): - """Determine how to display a parameter - - Emulates autosummary behavior if fake_autosummary - - Parameters - ---------- - param : str - The name of the parameter - desc : list of str - The parameter description as given in the docstring. This is - ignored when autosummary logic applies. - fake_autosummary : bool - If True, autosummary-style behaviour will apply for params - that are attributes of the class and have a docstring. - - Returns - ------- - display_param : str - The marked up parameter name for display. This may include a link - to the corresponding attribute's own documentation. - desc : list of str - A list of description lines. This may be identical to the input - ``desc``, if ``autosum is None`` or ``param`` is not a class - attribute, or it will be a summary of the class attribute's - docstring. - - Notes - ----- - This does not have the autosummary functionality to display a method's - signature, and hence is not used to format methods. It may be - complicated to incorporate autosummary's signature mangling, as it - relies on Sphinx's plugin mechanism. - """ - param = self._escape_args_and_kwargs(param.strip()) - # param = param.strip() - # XXX: If changing the following, please check the rendering when param - # ends with '_', e.g. 'word_' - # See https://github.com/numpy/numpydoc/pull/144 - display_param = f"**{param}**" - - if not fake_autosummary: - return display_param, desc - - param_obj = getattr(self._obj, param, None) - if not ( - callable(param_obj) - or isinstance(param_obj, property) - or inspect.isgetsetdescriptor(param_obj) - or inspect.ismemberdescriptor(param_obj) - ): - param_obj = None - obj_doc = pydoc.getdoc(param_obj) - - if not (param_obj and obj_doc): - return display_param, desc - - prefix = getattr(self, "_name", "") - if prefix: - link_prefix = f"{prefix}." - else: - link_prefix = "" - - # Referenced object has a docstring - display_param = f":obj:`{param} <{link_prefix}{param}>`" - if obj_doc: - # Overwrite desc. Take summary logic of autosummary - desc = re.split(r"\n\s*\n", obj_doc.strip(), 1)[0] - # XXX: Should this have DOTALL? - # It does not in autosummary - m = re.search(r"^([A-Z].*?\.)(?:\s|$)", " ".join(desc.split())) - if m: - desc = m.group(1).strip() - else: - desc = desc.partition("\n")[0] - desc = desc.split("\n") - return display_param, desc - - def _str_param_list(self, name, fake_autosummary=False): - """Generate RST for a listing of parameters or similar - - Parameter names are displayed as bold text, and descriptions - are in definition lists. - - Parameters - ---------- - name : str - Section name (e.g. Parameters) - fake_autosummary : bool - When True, the parameter names may correspond to attributes of the - object beign documented, usually ``property`` instances on a class. - In this case, names will be linked to fuller descriptions. - - Returns - ------- - rst : list of str - """ - out = [] - if self[name]: - out += self._str_field_list(name) - out += [""] - for param in self[name]: - display_param, desc = self._process_param( - param.name, param.desc, fake_autosummary - ) - parts = [] - if display_param: - parts.append(display_param) - param_type = param.type - if param_type: - param_type = param.type - if self.xref_param_type: - param_type = make_xref( - param_type, self.xref_aliases, self.xref_ignore - ) - parts.append(param_type) - out += self._str_indent([" : ".join(parts)]) - - if not desc: - # empty definition - desc = [".."] - out += self._str_indent(desc, 8) - out += [""] - - return out - - def _str_member_list(self, name): - """ - Generate a member listing, autosummary:: table where possible, - and a table where not. - - """ - out = [] - if self[name]: - out += [f".. rubric:: {name}", ""] - prefix = getattr(self, "_name", "") - - if prefix: - prefix = f"~{prefix}." - - autosum = [] - others = [] - for param in self[name]: - param = param._replace(name=param.name.strip()) - - # Check if the referenced member can have a docstring or not - param_obj = getattr(self._obj, param.name, None) - if not ( - callable(param_obj) - or isinstance(param_obj, property) - or inspect.isdatadescriptor(param_obj) - ): - param_obj = None - - if param_obj and pydoc.getdoc(param_obj): - # Referenced object has a docstring - autosum += [f" {prefix}{param.name}"] - else: - others.append(param) - - if autosum: - out += [".. autosummary::"] - if self.class_members_toctree: - out += [" :toctree:"] - out += [""] + autosum - - if others: - maxlen_0 = max(3, max(len(p.name) + 4 for p in others)) - hdr = "=" * maxlen_0 + " " + "=" * 10 - fmt = "%%%ds %%s " % (maxlen_0,) - out += ["", "", hdr] - for param in others: - name = "**" + param.name.strip() + "**" - desc = " ".join(x.strip() for x in param.desc).strip() - if param.type: - desc = f"({param.type}) {desc}" - out += [fmt % (name, desc)] - out += [hdr] - out += [""] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - content = textwrap.dedent("\n".join(self[name])).split("\n") - out += content - out += [""] - return out - - def _str_see_also(self, func_role): - out = [] - if self["See Also"]: - see_also = super()._str_see_also(func_role) - out = [".. seealso::", ""] - out += self._str_indent(see_also[2:]) - return out - - def _str_warnings(self): - out = [] - if self["Warnings"]: - out = [".. warning::", ""] - out += self._str_indent(self["Warnings"]) - out += [""] - return out - - def _str_index(self): - idx = self["index"] - out = [] - if len(idx) == 0: - return out - - out += [f".. index:: {idx.get('default', '')}"] - for section, references in idx.items(): - if section == "default": - continue - elif section == "refguide": - out += [f" single: {', '.join(references)}"] - else: - out += [f" {section}: {','.join(references)}"] - out += [""] - return out - - def _str_references(self): - out = [] - if self["References"]: - out += self._str_header("References") - if isinstance(self["References"], str): - self["References"] = [self["References"]] - out.extend(self["References"]) - out += [""] - # Latex collects all references to a separate bibliography, - # so we need to insert links to it - out += [".. only:: latex", ""] - items = [] - for line in self["References"]: - m = re.match(r".. \[([a-z0-9._-]+)\]", line, re.I) - if m: - items.append(m.group(1)) - out += [" " + ", ".join([f"[{item}]_" for item in items]), ""] - return out - - def _str_examples(self): - examples_str = "\n".join(self["Examples"]) - - if ( - self.use_plots - and re.search(IMPORT_MATPLOTLIB_RE, examples_str) - and "plot::" not in examples_str - ): - out = [] - out += self._str_header("Examples") - out += [".. plot::", ""] - out += self._str_indent(self["Examples"]) - out += [""] - return out - else: - return self._str_section("Examples") - - def __str__(self, indent=0, func_role="obj"): - ns = { - "signature": self._str_signature(), - "index": self._str_index(), - "summary": self._str_summary(), - "extended_summary": self._str_extended_summary(), - "parameters": self._str_param_list("Parameters"), - "returns": self._str_returns("Returns"), - "yields": self._str_returns("Yields"), - "receives": self._str_returns("Receives"), - "other_parameters": self._str_param_list("Other Parameters"), - "raises": self._str_returns("Raises"), - "warns": self._str_returns("Warns"), - "warnings": self._str_warnings(), - "see_also": self._str_see_also(func_role), - "notes": self._str_section("Notes"), - "references": self._str_references(), - "examples": self._str_examples(), - "attributes": ( - self._str_param_list("Attributes", fake_autosummary=True) - if self.attributes_as_param_list - else self._str_member_list("Attributes") - ), - "methods": self._str_member_list("Methods"), - } - ns = {k: "\n".join(v) for k, v in ns.items()} - - rendered = self.template.render(**ns) - return "\n".join(self._str_indent(rendered.split("\n"), indent)) - - -class SphinxFunctionDoc(SphinxDocString, FunctionDoc): - def __init__(self, obj, doc=None, config=None): - if config is None: - config = {} - self.load_config(config) - FunctionDoc.__init__(self, obj, doc=doc, config=config) - - -class SphinxClassDoc(SphinxDocString, ClassDoc): - def __init__(self, obj, doc=None, func_doc=None, config=None): - if config is None: - config = {} - self.load_config(config) - ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) - - -class SphinxObjDoc(SphinxDocString, ObjDoc): - def __init__(self, obj, doc=None, config=None): - if config is None: - config = {} - self.load_config(config) - ObjDoc.__init__(self, obj, doc=doc, config=config) - - -def get_doc_object(obj, what=None, doc=None, config=None, builder=None): - if config is None: - config = {} - - template_dirs = [os.path.join(os.path.dirname(__file__), "templates")] - if builder is not None: - template_loader = BuiltinTemplateLoader() - template_loader.init(builder, dirs=template_dirs) - else: - template_loader = FileSystemLoader(template_dirs) - template_env = SandboxedEnvironment(loader=template_loader) - config["template"] = template_env.get_template("numpydoc_docstring.rst") - - return get_doc_object_orig( - obj, - what=what, - doc=doc, - config=config, - class_doc=SphinxClassDoc, - func_doc=SphinxFunctionDoc, - obj_doc=SphinxObjDoc, - ) diff --git a/docs/sphinxext/github.py b/docs/sphinxext/github.py deleted file mode 100644 index 4f74b64d648..00000000000 --- a/docs/sphinxext/github.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Define text roles for GitHub - -* ghissue - Issue -* ghpull - Pull Request -* ghuser - User - -Adapted from bitbucket example here: -https://bitbucket.org/birkenfeld/sphinx-contrib/src/tip/bitbucket/sphinxcontrib/bitbucket.py - -Authors -------- - -* Doug Hellmann -* Min RK -""" -# -# Original Copyright (c) 2010 Doug Hellmann. All rights reserved. -# - -from docutils import nodes, utils -from docutils.parsers.rst.roles import set_classes - -def make_link_node(rawtext, app, type, slug, options): - """Create a link to a github resource. - - :param rawtext: Text being replaced with link node. - :param app: Sphinx application context - :param type: Link type (issues, changeset, etc.) - :param slug: ID of the thing to link to - :param options: Options dictionary passed to role func. - """ - - try: - base = app.config.github_project_url - if not base: - raise AttributeError - if not base.endswith('/'): - base += '/' - except AttributeError as err: - raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) - - ref = base + type + '/' + slug + '/' - set_classes(options) - prefix = "#" - if type == 'pull': - prefix = "PR " + prefix - node = nodes.reference(rawtext, prefix + utils.unescape(slug), refuri=ref, - **options) - return node - -def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - """Link to a GitHub issue. - - Returns 2 part tuple containing list of nodes to insert into the - document and a list of system messages. Both are allowed to be - empty. - - :param name: The role name used in the document. - :param rawtext: The entire markup snippet, with role. - :param text: The text marked with the role. - :param lineno: The line number where rawtext appears in the input. - :param inliner: The inliner instance that called us. - :param options: Directive options for customization. - :param content: The directive content for customization. - """ - - try: - issue_num = int(text) - if issue_num <= 0: - raise ValueError - except ValueError: - msg = inliner.reporter.error( - 'GitHub issue number must be a number greater than or equal to 1; ' - '"%s" is invalid.' % text, line=lineno) - prb = inliner.problematic(rawtext, rawtext, msg) - return [prb], [msg] - app = inliner.document.settings.env.app - #app.info('issue %r' % text) - if 'pull' in name.lower(): - category = 'pull' - elif 'issue' in name.lower(): - category = 'issues' - else: - msg = inliner.reporter.error( - 'GitHub roles include "ghpull" and "ghissue", ' - '"%s" is invalid.' % name, line=lineno) - prb = inliner.problematic(rawtext, rawtext, msg) - return [prb], [msg] - node = make_link_node(rawtext, app, category, str(issue_num), options) - return [node], [] - -def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - """Link to a GitHub user. - - Returns 2 part tuple containing list of nodes to insert into the - document and a list of system messages. Both are allowed to be - empty. - - :param name: The role name used in the document. - :param rawtext: The entire markup snippet, with role. - :param text: The text marked with the role. - :param lineno: The line number where rawtext appears in the input. - :param inliner: The inliner instance that called us. - :param options: Directive options for customization. - :param content: The directive content for customization. - """ - app = inliner.document.settings.env.app - #app.info('user link %r' % text) - ref = 'https://www.github.com/' + text - node = nodes.reference(rawtext, text, refuri=ref, **options) - return [node], [] - -def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]): - """Link to a GitHub commit. - - Returns 2 part tuple containing list of nodes to insert into the - document and a list of system messages. Both are allowed to be - empty. - - :param name: The role name used in the document. - :param rawtext: The entire markup snippet, with role. - :param text: The text marked with the role. - :param lineno: The line number where rawtext appears in the input. - :param inliner: The inliner instance that called us. - :param options: Directive options for customization. - :param content: The directive content for customization. - """ - app = inliner.document.settings.env.app - #app.info('user link %r' % text) - try: - base = app.config.github_project_url - if not base: - raise AttributeError - if not base.endswith('/'): - base += '/' - except AttributeError as err: - raise ValueError('github_project_url configuration value is not set (%s)' % str(err)) - - ref = base + text - node = nodes.reference(rawtext, text[:6], refuri=ref, **options) - return [node], [] - - -def setup(app): - """Install the plugin. - - :param app: Sphinx application context. - """ - app.info('Initializing GitHub plugin') - app.add_role('ghissue', ghissue_role) - app.add_role('ghpull', ghissue_role) - app.add_role('ghuser', ghuser_role) - app.add_role('ghcommit', ghcommit_role) - app.add_config_value('github_project_url', None, 'env') diff --git a/docs/sphinxext/math_dollar.py b/docs/sphinxext/math_dollar.py deleted file mode 100644 index ad415deb905..00000000000 --- a/docs/sphinxext/math_dollar.py +++ /dev/null @@ -1,63 +0,0 @@ -import re - -def dollars_to_math(source): - r""" - Replace dollar signs with backticks. - - More precisely, do a regular expression search. Replace a plain - dollar sign ($) by a backtick (`). Replace an escaped dollar sign - (\$) by a dollar sign ($). Don't change a dollar sign preceded or - followed by a backtick (`$ or $`), because of strings like - "``$HOME``". Don't make any changes on lines starting with - spaces, because those are indented and hence part of a block of - code or examples. - - This also doesn't replaces dollar signs enclosed in curly braces, - to avoid nested math environments, such as :: - - $f(n) = 0 \text{ if $n$ is prime}$ - - Thus the above line would get changed to - - `f(n) = 0 \text{ if $n$ is prime}` - """ - s = "\n".join(source) - if s.find("$") == -1: - return - # This searches for "$blah$" inside a pair of curly braces -- - # don't change these, since they're probably coming from a nested - # math environment. So for each match, we replace it with a temporary - # string, and later on we substitute the original back. - global _data - _data = {} - def repl(matchobj): - global _data - s = matchobj.group(0) - t = "___XXX_REPL_%d___" % len(_data) - _data[t] = s - return t - s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s) - # matches $...$ - dollars = re.compile(r"(?=0.18.1 - """ - return ( - node.findall(condition, **kwargs) - if hasattr(node, "findall") - else node.traverse(condition, **kwargs) - ) - - -def rename_references(app, what, name, obj, options, lines): - # decorate reference numbers so that there are no duplicates - # these are later undecorated in the doctree, in relabel_references - references = set() - for line in lines: - line = line.strip() - m = re.match(r"^\.\. +\[(%s)\]" % app.config.numpydoc_citation_re, line, re.I) - if m: - references.add(m.group(1)) - - if references: - # we use a hash to mangle the reference name to avoid invalid names - sha = hashlib.sha256() - sha.update(name.encode("utf8")) - prefix = "R" + sha.hexdigest()[:HASH_LEN] - - for r in references: - new_r = prefix + "-" + r - for i, line in enumerate(lines): - lines[i] = lines[i].replace(f"[{r}]_", f"[{new_r}]_") - lines[i] = lines[i].replace(f".. [{r}]", f".. [{new_r}]") - - -def _is_cite_in_numpydoc_docstring(citation_node): - # Find DEDUPLICATION_TAG in comment as last node of sibling section - - # XXX: I failed to use citation_node.traverse to do this: - section_node = citation_node.parent - - def is_docstring_section(node): - return isinstance(node, (section, desc_content)) - - while not is_docstring_section(section_node): - section_node = section_node.parent - if section_node is None: - return False - - sibling_sections = itertools.chain( - _traverse_or_findall( - section_node, - is_docstring_section, - include_self=True, - descend=False, - siblings=True, - ) - ) - for sibling_section in sibling_sections: - if not sibling_section.children: - continue - - for child in sibling_section.children[::-1]: - if not isinstance(child, comment): - continue - - if child.rawsource.strip() == DEDUPLICATION_TAG.strip(): - return True - - return False - - -def relabel_references(app, doc): - # Change 'hash-ref' to 'ref' in label text - for citation_node in _traverse_or_findall(doc, citation): - if not _is_cite_in_numpydoc_docstring(citation_node): - continue - label_node = citation_node[0] - prefix, _, new_label = label_node[0].astext().partition("-") - assert len(prefix) == HASH_LEN + 1 - new_text = Text(new_label) - label_node.replace(label_node[0], new_text) - - for id_ in citation_node["backrefs"]: - ref = doc.ids[id_] - ref_text = ref[0] - - # Sphinx has created pending_xref nodes with [reftext] text. - def matching_pending_xref(node): - return ( - isinstance(node, pending_xref) - and node[0].astext() == f"[{ref_text}]" - ) - - for xref_node in _traverse_or_findall(ref.parent, matching_pending_xref): - xref_node.replace(xref_node[0], Text(f"[{new_text}]")) - ref.replace(ref_text, new_text.copy()) - - -def clean_backrefs(app, doc, docname): - # only::latex directive has resulted in citation backrefs without reference - known_ref_ids = set() - for ref in _traverse_or_findall(doc, reference, descend=True): - for id_ in ref["ids"]: - known_ref_ids.add(id_) - # some extensions produce backrefs to inline elements - for ref in _traverse_or_findall(doc, inline, descend=True): - for id_ in ref["ids"]: - known_ref_ids.add(id_) - for citation_node in _traverse_or_findall(doc, citation, descend=True): - # remove backrefs to non-existent refs - citation_node["backrefs"] = [ - id_ for id_ in citation_node["backrefs"] if id_ in known_ref_ids - ] - - -DEDUPLICATION_TAG = " !! processed by numpydoc !!" - - -def mangle_docstrings(app, what, name, obj, options, lines): - if DEDUPLICATION_TAG in lines: - return - show_inherited_class_members = app.config.numpydoc_show_inherited_class_members - if isinstance(show_inherited_class_members, dict): - try: - show_inherited_class_members = show_inherited_class_members[name] - except KeyError: - show_inherited_class_members = True - - cfg = { - "use_plots": app.config.numpydoc_use_plots, - "show_class_members": app.config.numpydoc_show_class_members, - "show_inherited_class_members": show_inherited_class_members, - "class_members_toctree": app.config.numpydoc_class_members_toctree, - "attributes_as_param_list": app.config.numpydoc_attributes_as_param_list, - "xref_param_type": app.config.numpydoc_xref_param_type, - "xref_aliases": app.config.numpydoc_xref_aliases_complete, - "xref_ignore": app.config.numpydoc_xref_ignore, - } - - cfg.update(options or {}) - u_NL = "\n" - if what == "module": - # Strip top title - pattern = "^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*" - title_re = re.compile(pattern, re.I | re.S) - lines[:] = title_re.sub("", u_NL.join(lines)).split(u_NL) - else: - try: - doc = get_doc_object( - obj, what, u_NL.join(lines), config=cfg, builder=app.builder - ) - lines[:] = str(doc).split(u_NL) - except Exception: - logger.error("[numpydoc] While processing docstring for %r", name) - raise - - if app.config.numpydoc_validation_checks: - # If the user has supplied patterns to ignore via the - # numpydoc_validation_exclude config option, skip validation for - # any objs whose name matches any of the patterns - excluder = app.config.numpydoc_validation_excluder - exclude_from_validation = excluder.search(name) if excluder else False - if not exclude_from_validation: - # TODO: Currently, all validation checks are run and only those - # selected via config are reported. It would be more efficient to - # only run the selected checks. - report = validate(doc) - errors = [ - err - for err in report["errors"] - if not ( - ( - overrides := app.config.numpydoc_validation_overrides.get( - err[0] - ) - ) - and re.search(overrides, report["docstring"]) - ) - ] - if {err[0] for err in errors} & app.config.numpydoc_validation_checks: - msg = ( - f"[numpydoc] Validation warnings while processing " - f"docstring for {name!r}:\n" - ) - for err in errors: - if err[0] in app.config.numpydoc_validation_checks: - msg += f" {err[0]}: {err[1]}\n" - logger.warning(msg) - - # call function to replace reference numbers so that there are no - # duplicates - rename_references(app, what, name, obj, options, lines) - - lines += ["..", DEDUPLICATION_TAG] - - -def mangle_signature(app, what, name, obj, options, sig, retann): - # Do not try to inspect classes that don't define `__init__` - if inspect.isclass(obj) and ( - not hasattr(obj, "__init__") - or "initializes x; see " in pydoc.getdoc(obj.__init__) - ): - return "", "" - - if not (isinstance(obj, Callable) or hasattr(obj, "__argspec_is_invalid_")): - return - - if not hasattr(obj, "__doc__"): - return - doc = get_doc_object(obj, config={"show_class_members": False}) - sig = doc["Signature"] or _clean_text_signature( - getattr(obj, "__text_signature__", None) - ) - if sig: - sig = re.sub("^[^(]*", "", sig) - return sig, "" - - -def _clean_text_signature(sig): - if sig is None: - return None - start_pattern = re.compile(r"^[^(]*\(") - start, end = start_pattern.search(sig).span() - start_sig = sig[start:end] - sig = sig[end:-1] - sig = re.sub(r"^\$(self|module|type)(,\s|$)", "", sig, count=1) - sig = re.sub(r"(^|(?<=,\s))/,\s\*", "*", sig, count=1) - return start_sig + sig + ")" - - -def setup(app, get_doc_object_=get_doc_object): - if not hasattr(app, "add_config_value"): - return # probably called by nose, better bail out - - global get_doc_object - get_doc_object = get_doc_object_ - - app.setup_extension("sphinx.ext.autosummary") - app.connect("config-inited", update_config) - app.connect("autodoc-process-docstring", mangle_docstrings) - app.connect("autodoc-process-signature", mangle_signature) - app.connect("doctree-read", relabel_references) - app.connect("doctree-resolved", clean_backrefs) - app.add_config_value("numpydoc_use_plots", None, False) - app.add_config_value("numpydoc_show_class_members", True, True) - app.add_config_value( - "numpydoc_show_inherited_class_members", True, True, types=(bool, dict) - ) - app.add_config_value("numpydoc_class_members_toctree", True, True) - app.add_config_value("numpydoc_citation_re", "[a-z0-9_.-]+", True) - app.add_config_value("numpydoc_attributes_as_param_list", True, True) - app.add_config_value("numpydoc_xref_param_type", False, True) - app.add_config_value("numpydoc_xref_aliases", dict(), True) - app.add_config_value("numpydoc_xref_ignore", set(), True) - app.add_config_value("numpydoc_validation_checks", set(), True) - app.add_config_value("numpydoc_validation_exclude", set(), False) - app.add_config_value("numpydoc_validation_overrides", dict(), False) - - # Extra mangling domains - app.add_domain(NumpyPythonDomain) - app.add_domain(NumpyCDomain) - - metadata = {"version": __version__, "parallel_read_safe": True} - return metadata - - -def update_config(app, config=None): - """Update the configuration with default values.""" - if config is None: # needed for testing and old Sphinx - config = app.config - # Do not simply overwrite the `app.config.numpydoc_xref_aliases` - # otherwise the next sphinx-build will compare the incoming values (without - # our additions) to the old values (with our additions) and trigger - # a full rebuild! - numpydoc_xref_aliases_complete = deepcopy(config.numpydoc_xref_aliases) - for key, value in DEFAULT_LINKS.items(): - if key not in numpydoc_xref_aliases_complete: - numpydoc_xref_aliases_complete[key] = value - config.numpydoc_xref_aliases_complete = numpydoc_xref_aliases_complete - - # Processing to determine whether numpydoc_validation_checks is treated - # as a blocklist or allowlist - config.numpydoc_validation_checks = get_validation_checks( - config.numpydoc_validation_checks - ) - - # Generate the regexp for docstrings to ignore during validation - if isinstance(config.numpydoc_validation_exclude, str): - raise ValueError( - f"numpydoc_validation_exclude must be a container of strings, " - f"e.g. [{config.numpydoc_validation_exclude!r}]." - ) - config.numpydoc_validation_excluder = None - if config.numpydoc_validation_exclude: - exclude_expr = re.compile( - r"|".join(exp for exp in config.numpydoc_validation_exclude) - ) - config.numpydoc_validation_excluder = exclude_expr - - for check, patterns in config.numpydoc_validation_overrides.items(): - config.numpydoc_validation_overrides[check] = re.compile( - r"|".join(exp for exp in patterns) - ) - - -# ------------------------------------------------------------------------------ -# Docstring-mangling domains -# ------------------------------------------------------------------------------ - -from docutils.statemachine import ViewList -from sphinx.domains.c import CDomain -from sphinx.domains.python import PythonDomain - - -class ManglingDomainBase: - directive_mangling_map = {} - - def __init__(self, *a, **kw): - super().__init__(*a, **kw) - self.wrap_mangling_directives() - - def wrap_mangling_directives(self): - for name, objtype in list(self.directive_mangling_map.items()): - self.directives[name] = wrap_mangling_directive( - self.directives[name], objtype - ) - - -class NumpyPythonDomain(ManglingDomainBase, PythonDomain): - name = "np" - directive_mangling_map = { - "function": "function", - "class": "class", - "exception": "class", - "method": "function", - "classmethod": "function", - "staticmethod": "function", - "attribute": "attribute", - } - indices = [] - - -class NumpyCDomain(ManglingDomainBase, CDomain): - name = "np-c" - directive_mangling_map = { - "function": "function", - "member": "attribute", - "macro": "function", - "type": "class", - "var": "object", - } - - -def match_items(lines, content_old): - """Create items for mangled lines. - - This function tries to match the lines in ``lines`` with the items (source - file references and line numbers) in ``content_old``. The - ``mangle_docstrings`` function changes the actual docstrings, but doesn't - keep track of where each line came from. The mangling does many operations - on the original lines, which are hard to track afterwards. - - Many of the line changes come from deleting or inserting blank lines. This - function tries to match lines by ignoring blank lines. All other changes - (such as inserting figures or changes in the references) are completely - ignored, so the generated line numbers will be off if ``mangle_docstrings`` - does anything non-trivial. - - This is a best-effort function and the real fix would be to make - ``mangle_docstrings`` actually keep track of the ``items`` together with - the ``lines``. - - Examples - -------- - >>> lines = ['', 'A', '', 'B', ' ', '', 'C', 'D'] - >>> lines_old = ['a', '', '', 'b', '', 'c'] - >>> items_old = [('file1.py', 0), ('file1.py', 1), ('file1.py', 2), - ... ('file2.py', 0), ('file2.py', 1), ('file2.py', 2)] - >>> content_old = ViewList(lines_old, items=items_old) - >>> match_items(lines, content_old) # doctest: +NORMALIZE_WHITESPACE - [('file1.py', 0), ('file1.py', 0), ('file2.py', 0), ('file2.py', 0), - ('file2.py', 2), ('file2.py', 2), ('file2.py', 2), ('file2.py', 2)] - >>> # first 2 ``lines`` are matched to 'a', second 2 to 'b', rest to 'c' - >>> # actual content is completely ignored. - - Notes - ----- - The algorithm tries to match any line in ``lines`` with one in - ``lines_old``. It skips over all empty lines in ``lines_old`` and assigns - this line number to all lines in ``lines``, unless a non-empty line is - found in ``lines`` in which case it goes to the next line in ``lines_old``. - - """ - items_new = [] - lines_old = content_old.data - items_old = content_old.items - j = 0 - for i, line in enumerate(lines): - # go to next non-empty line in old: - # line.strip() checks whether the string is all whitespace - while j < len(lines_old) - 1 and not lines_old[j].strip(): - j += 1 - items_new.append(items_old[j]) - if line.strip() and j < len(lines_old) - 1: - j += 1 - assert len(items_new) == len(lines) - return items_new - - -def wrap_mangling_directive(base_directive, objtype): - class directive(base_directive): - def run(self): - env = self.state.document.settings.env - - name = None - if self.arguments: - m = re.match(r"^(.*\s+)?(.*?)(\(.*)?", self.arguments[0]) - name = m.group(2).strip() - - if not name: - name = self.arguments[0] - - lines = list(self.content) - mangle_docstrings(env.app, objtype, name, None, None, lines) - if self.content: - items = match_items(lines, self.content) - self.content = ViewList(lines, items=items, parent=self.content.parent) - - return base_directive.run(self) - - return directive diff --git a/niworkflows/__about__.py b/niworkflows/__about__.py index e0923bf4334..e88c2194388 100644 --- a/niworkflows/__about__.py +++ b/niworkflows/__about__.py @@ -26,17 +26,14 @@ Center for Reproducible Neuroscience (https://reproducibility.stanford.edu/), as well as for open-source software distribution. """ -from datetime import datetime -__packagename__ = "niworkflows" -__copyright__ = "Copyright {}, The NiPreps Developers".format( - datetime.now().year -) +__packagename__ = 'niworkflows' +__copyright__ = 'Copyright, The NiPreps Developers' __credits__ = [ - "Oscar Esteban", - "Ross Blair", - "Shoshana L. Berleant", - "Christopher J. Markiewicz", - "Chris Gorgolewski", - "Russell A. Poldrack", + 'Oscar Esteban', + 'Ross Blair', + 'Shoshana L. Berleant', + 'Christopher J. Markiewicz', + 'Chris Gorgolewski', + 'Russell A. Poldrack', ] diff --git a/niworkflows/__init__.py b/niworkflows/__init__.py index 390c7030239..d3cf16edeaf 100644 --- a/niworkflows/__init__.py +++ b/niworkflows/__init__.py @@ -1,33 +1,35 @@ # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """NeuroImaging Workflows (NIWorkflows) is a selection of image processing workflows.""" + import logging from acres import Loader -from .__about__ import __packagename__, __copyright__, __credits__ +from .__about__ import __copyright__, __credits__, __packagename__ + try: from ._version import __version__ except ImportError: # pragma: no cover - __version__ = "0+unknown" + __version__ = '0+unknown' __all__ = [ - "__version__", - "__packagename__", - "__copyright__", - "__credits__", - "NIWORKFLOWS_LOG", - "load_resource", + '__version__', + '__packagename__', + '__copyright__', + '__credits__', + 'NIWORKFLOWS_LOG', + 'load_resource', ] NIWORKFLOWS_LOG = logging.getLogger(__packagename__) NIWORKFLOWS_LOG.setLevel(logging.INFO) try: - import matplotlib + import matplotlib as mpl - matplotlib.use("Agg") + mpl.use('Agg') except ImportError: pass diff --git a/niworkflows/anat/ants.py b/niworkflows/anat/ants.py index 5854f7c9d64..c77089f85eb 100644 --- a/niworkflows/anat/ants.py +++ b/niworkflows/anat/ants.py @@ -23,12 +23,9 @@ """Nipype translation of ANTs' workflows.""" # general purpose -from collections import OrderedDict from multiprocessing import cpu_count from warnings import warn -# nipype -from nipype.pipeline import engine as pe from nipype.interfaces import utility as niu from nipype.interfaces.ants import ( AI, @@ -39,35 +36,40 @@ ThresholdImage, ) +# nipype +from nipype.pipeline import engine as pe + from ..data import load as load_data -from ..utils.misc import get_template_specs -from ..utils.connections import pop_file as _pop +from ..interfaces.fixes import ( + FixHeaderApplyTransforms as ApplyTransforms, +) # niworkflows from ..interfaces.fixes import ( FixHeaderRegistration as Registration, - FixHeaderApplyTransforms as ApplyTransforms, ) -from ..interfaces.nibabel import ApplyMask, RegridToZooms from ..interfaces.header import CopyXForm - +from ..interfaces.nibabel import ApplyMask, RegridToZooms +from ..utils.connections import pop_file as _pop +from ..utils.misc import get_template_specs ATROPOS_MODELS = { - "T1w": OrderedDict([("nclasses", 3), ("csf", 1), ("gm", 2), ("wm", 3)]), - "T2w": OrderedDict([("nclasses", 3), ("csf", 3), ("gm", 2), ("wm", 1)]), - "FLAIR": OrderedDict([("nclasses", 3), ("csf", 1), ("gm", 3), ("wm", 2)]), + 'T1w': {'nclasses': 3, 'csf': 1, 'gm': 2, 'wm': 3}, + 'T2w': {'nclasses': 3, 'csf': 3, 'gm': 2, 'wm': 1}, + 'FLAIR': {'nclasses': 3, 'csf': 1, 'gm': 3, 'wm': 2}, } +T1W_MODEL = tuple(ATROPOS_MODELS['T1w'].values()) def init_brain_extraction_wf( - name="brain_extraction_wf", - in_template="OASIS30ANTs", + name='brain_extraction_wf', + in_template='OASIS30ANTs', template_spec=None, use_float=True, - normalization_quality="precise", + normalization_quality='precise', omp_nthreads=None, mem_gb=3.0, - bids_suffix="T1w", + bids_suffix='T1w', atropos_refine=True, atropos_use_random_seed=True, atropos_model=None, @@ -185,7 +187,8 @@ def init_brain_extraction_wf( Output :abbr:`TPMs (tissue probability maps)` by ATROPOS """ - from packaging.version import parse as parseversion, Version + from packaging.version import Version + from packaging.version import parse as parseversion from templateflow.api import get as get_template wf = pe.Workflow(name) @@ -193,27 +196,27 @@ def init_brain_extraction_wf( template_spec = template_spec or {} # suffix passed via spec takes precedence - template_spec["suffix"] = template_spec.get("suffix", bids_suffix) + template_spec['suffix'] = template_spec.get('suffix', bids_suffix) tpl_target_path, common_spec = get_template_specs( - in_template, template_spec=template_spec, fallback=True, + in_template, + template_spec=template_spec, + fallback=True, ) # Get probabilistic brain mask if available tpl_mask_path = get_template( - in_template, label="brain", suffix="probseg", **common_spec - ) or get_template(in_template, desc="brain", suffix="mask", **common_spec) + in_template, label='brain', suffix='probseg', **common_spec + ) or get_template(in_template, desc='brain', suffix='mask', **common_spec) if omp_nthreads is None or omp_nthreads < 1: omp_nthreads = cpu_count() - inputnode = pe.Node( - niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode" - ) + inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']), name='inputnode') # Try to find a registration mask, set if available tpl_regmask_path = get_template( - in_template, desc="BrainCerebellumExtraction", suffix="mask", **common_spec + in_template, desc='BrainCerebellumExtraction', suffix='mask', **common_spec ) if tpl_regmask_path: inputnode.inputs.in_mask = str(tpl_regmask_path) @@ -221,23 +224,21 @@ def init_brain_extraction_wf( outputnode = pe.Node( niu.IdentityInterface( fields=[ - "out_file", - "out_mask", - "bias_corrected", - "bias_image", - "out_segm", - "out_tpms", + 'out_file', + 'out_mask', + 'bias_corrected', + 'bias_image', + 'out_segm', + 'out_tpms', ] ), - name="outputnode", + name='outputnode', ) trunc = pe.MapNode( - ImageMath( - operation="TruncateImageIntensity", op2="0.01 0.999 256", copy_header=True - ), - name="truncate_images", - iterfield=["op1"], + ImageMath(operation='TruncateImageIntensity', op2='0.01 0.999 256', copy_header=True), + name='truncate_images', + iterfield=['op1'], ) inu_n4 = pe.MapNode( N4BiasFieldCorrection( @@ -250,39 +251,39 @@ def init_brain_extraction_wf( bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, - name="inu_n4", - iterfield=["input_image"], + name='inu_n4', + iterfield=['input_image'], ) res_tmpl = pe.Node( RegridToZooms(in_file=tpl_target_path, zooms=(4, 4, 4), smooth=True), - name="res_tmpl", + name='res_tmpl', ) - res_target = pe.Node(RegridToZooms(zooms=(4, 4, 4), smooth=True), name="res_target") + res_target = pe.Node(RegridToZooms(zooms=(4, 4, 4), smooth=True), name='res_target') lap_tmpl = pe.Node( - ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), name="lap_tmpl" + ImageMath(operation='Laplacian', op2='1.5 1', copy_header=True), name='lap_tmpl' ) lap_tmpl.inputs.op1 = tpl_target_path lap_target = pe.Node( - ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), - name="lap_target", + ImageMath(operation='Laplacian', op2='1.5 1', copy_header=True), + name='lap_target', ) - mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl") + mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl') mrg_tmpl.inputs.in1 = tpl_target_path - mrg_target = pe.Node(niu.Merge(2), name="mrg_target") + mrg_target = pe.Node(niu.Merge(2), name='mrg_target') # Initialize transforms with antsAI init_aff = pe.Node( AI( - metric=("Mattes", 32, "Regular", 0.25), - transform=("Affine", 0.1), + metric=('Mattes', 32, 'Regular', 0.25), + transform=('Affine', 0.1), search_factor=(15, 0.1), principal_axes=False, convergence=(10, 1e-6, 10), verbose=True, ), - name="init_aff", + name='init_aff', n_procs=omp_nthreads, ) @@ -292,31 +293,32 @@ def init_brain_extraction_wf( except ValueError: warn( "antsAI's option --search-grid was added in ANTS 2.3.0 " - f"({init_aff.interface.version} found.)" + f'({init_aff.interface.version} found.)', + stacklevel=1, ) # Set up spatial normalization settings_file = ( - "antsBrainExtraction_%s.json" + 'antsBrainExtraction_%s.json' if use_laplacian - else "antsBrainExtractionNoLaplacian_%s.json" + else 'antsBrainExtractionNoLaplacian_%s.json' ) norm = pe.Node( Registration(from_file=load_data(settings_file % normalization_quality)), - name="norm", + name='norm', n_procs=omp_nthreads, mem_gb=mem_gb, ) norm.inputs.float = use_float - fixed_mask_trait = "fixed_image_mask" + fixed_mask_trait = 'fixed_image_mask' - if norm.interface.version and parseversion(norm.interface.version) >= Version( - "2.2.0" - ): - fixed_mask_trait += "s" + if norm.interface.version and parseversion(norm.interface.version) >= Version('2.2.0'): + fixed_mask_trait += 's' map_brainmask = pe.Node( - ApplyTransforms(interpolation="Gaussian"), name="map_brainmask", mem_gb=1, + ApplyTransforms(interpolation='Gaussian'), + name='map_brainmask', + mem_gb=1, ) map_brainmask.inputs.input_image = str(tpl_mask_path) @@ -329,7 +331,7 @@ def init_brain_extraction_wf( outside_value=0, copy_header=True, ), - name="thr_brainmask", + name='thr_brainmask', ) # Refine INU correction @@ -344,107 +346,105 @@ def init_brain_extraction_wf( bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, - name="inu_n4_final", - iterfield=["input_image"], + name='inu_n4_final', + iterfield=['input_image'], ) try: inu_n4_final.inputs.rescale_intensities = True except ValueError: warn( "N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 " - f"({inu_n4_final.interface.version} found.) Please consider upgrading.", + f'({inu_n4_final.interface.version} found.) Please consider upgrading.', UserWarning, + stacklevel=1, ) # Apply mask - apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask") + apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask') # fmt: off wf.connect([ - (inputnode, trunc, [("in_files", "op1")]), - (inputnode, inu_n4_final, [("in_files", "input_image")]), - (inputnode, init_aff, [("in_mask", "fixed_image_mask")]), - (inputnode, norm, [("in_mask", fixed_mask_trait)]), - (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]), - (trunc, inu_n4, [("output_image", "input_image")]), - (inu_n4, res_target, [(("output_image", _pop), "in_file")]), - (res_tmpl, init_aff, [("out_file", "fixed_image")]), - (res_target, init_aff, [("out_file", "moving_image")]), - (init_aff, norm, [("output_transform", "initial_moving_transform")]), + (inputnode, trunc, [('in_files', 'op1')]), + (inputnode, inu_n4_final, [('in_files', 'input_image')]), + (inputnode, init_aff, [('in_mask', 'fixed_image_mask')]), + (inputnode, norm, [('in_mask', fixed_mask_trait)]), + (inputnode, map_brainmask, [(('in_files', _pop), 'reference_image')]), + (trunc, inu_n4, [('output_image', 'input_image')]), + (inu_n4, res_target, [(('output_image', _pop), 'in_file')]), + (res_tmpl, init_aff, [('out_file', 'fixed_image')]), + (res_target, init_aff, [('out_file', 'moving_image')]), + (init_aff, norm, [('output_transform', 'initial_moving_transform')]), (norm, map_brainmask, [ - ("reverse_transforms", "transforms"), - ("reverse_invert_flags", "invert_transform_flags"), + ('reverse_transforms', 'transforms'), + ('reverse_invert_flags', 'invert_transform_flags'), ]), - (map_brainmask, thr_brainmask, [("output_image", "input_image")]), - (map_brainmask, inu_n4_final, [("output_image", "weight_image")]), - (inu_n4_final, apply_mask, [("output_image", "in_file")]), - (thr_brainmask, apply_mask, [("output_image", "in_mask")]), - (thr_brainmask, outputnode, [("output_image", "out_mask")]), - (inu_n4_final, outputnode, [("output_image", "bias_corrected"), - ("bias_image", "bias_image")]), - (apply_mask, outputnode, [("out_file", "out_file")]), + (map_brainmask, thr_brainmask, [('output_image', 'input_image')]), + (map_brainmask, inu_n4_final, [('output_image', 'weight_image')]), + (inu_n4_final, apply_mask, [('output_image', 'in_file')]), + (thr_brainmask, apply_mask, [('output_image', 'in_mask')]), + (thr_brainmask, outputnode, [('output_image', 'out_mask')]), + (inu_n4_final, outputnode, [('output_image', 'bias_corrected'), + ('bias_image', 'bias_image')]), + (apply_mask, outputnode, [('out_file', 'out_file')]), ]) # fmt: on - wm_tpm = ( - get_template(in_template, label="WM", suffix="probseg", **common_spec) or None - ) + wm_tpm = get_template(in_template, label='WM', suffix='probseg', **common_spec) or None if wm_tpm: map_wmmask = pe.Node( - ApplyTransforms(interpolation="Gaussian"), name="map_wmmask", mem_gb=1, + ApplyTransforms(interpolation='Gaussian'), + name='map_wmmask', + mem_gb=1, ) # Add the brain stem if it is found. - bstem_tpm = ( - get_template(in_template, label="BS", suffix="probseg", **common_spec) - or None - ) + bstem_tpm = get_template(in_template, label='BS', suffix='probseg', **common_spec) or None if bstem_tpm: - full_wm = pe.Node(niu.Function(function=_imsum), name="full_wm") + full_wm = pe.Node(niu.Function(function=_imsum), name='full_wm') full_wm.inputs.op1 = str(wm_tpm) full_wm.inputs.op2 = str(bstem_tpm) # fmt: off wf.connect([ - (full_wm, map_wmmask, [("out", "input_image")]) + (full_wm, map_wmmask, [('out', 'input_image')]) ]) # fmt: on else: map_wmmask.inputs.input_image = str(wm_tpm) # fmt: off wf.disconnect([ - (map_brainmask, inu_n4_final, [("output_image", "weight_image")]), + (map_brainmask, inu_n4_final, [('output_image', 'weight_image')]), ]) wf.connect([ - (inputnode, map_wmmask, [(("in_files", _pop), "reference_image")]), + (inputnode, map_wmmask, [(('in_files', _pop), 'reference_image')]), (norm, map_wmmask, [ - ("reverse_transforms", "transforms"), - ("reverse_invert_flags", "invert_transform_flags"), + ('reverse_transforms', 'transforms'), + ('reverse_invert_flags', 'invert_transform_flags'), ]), - (map_wmmask, inu_n4_final, [("output_image", "weight_image")]), + (map_wmmask, inu_n4_final, [('output_image', 'weight_image')]), ]) # fmt: on if use_laplacian: lap_tmpl = pe.Node( - ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), - name="lap_tmpl", + ImageMath(operation='Laplacian', op2='1.5 1', copy_header=True), + name='lap_tmpl', ) lap_tmpl.inputs.op1 = tpl_target_path lap_target = pe.Node( - ImageMath(operation="Laplacian", op2="1.5 1", copy_header=True), - name="lap_target", + ImageMath(operation='Laplacian', op2='1.5 1', copy_header=True), + name='lap_target', ) - mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl") + mrg_tmpl = pe.Node(niu.Merge(2), name='mrg_tmpl') mrg_tmpl.inputs.in1 = tpl_target_path - mrg_target = pe.Node(niu.Merge(2), name="mrg_target") + mrg_target = pe.Node(niu.Merge(2), name='mrg_target') # fmt: off wf.connect([ - (inu_n4, lap_target, [(("output_image", _pop), "op1")]), - (lap_tmpl, mrg_tmpl, [("output_image", "in2")]), - (inu_n4, mrg_target, [("output_image", "in1")]), - (lap_target, mrg_target, [("output_image", "in2")]), - (mrg_tmpl, norm, [("out", "fixed_image")]), - (mrg_target, norm, [("out", "moving_image")]), + (inu_n4, lap_target, [(('output_image', _pop), 'op1')]), + (lap_tmpl, mrg_tmpl, [('output_image', 'in2')]), + (inu_n4, mrg_target, [('output_image', 'in1')]), + (lap_target, mrg_target, [('output_image', 'in2')]), + (mrg_tmpl, norm, [('out', 'fixed_image')]), + (mrg_target, norm, [('out', 'moving_image')]), ]) # fmt: on @@ -452,7 +452,7 @@ def init_brain_extraction_wf( norm.inputs.fixed_image = tpl_target_path # fmt: off wf.connect([ - (inu_n4, norm, [(("output_image", _pop), "moving_image")]), + (inu_n4, norm, [(('output_image', _pop), 'moving_image')]), ]) # fmt: on @@ -469,41 +469,41 @@ def init_brain_extraction_wf( # fmt: off wf.disconnect([ - (thr_brainmask, outputnode, [("output_image", "out_mask")]), - (inu_n4_final, outputnode, [("output_image", "bias_corrected"), - ("bias_image", "bias_image")]), - (apply_mask, outputnode, [("out_file", "out_file")]), + (thr_brainmask, outputnode, [('output_image', 'out_mask')]), + (inu_n4_final, outputnode, [('output_image', 'bias_corrected'), + ('bias_image', 'bias_image')]), + (apply_mask, outputnode, [('out_file', 'out_file')]), ]) wf.connect([ - (inputnode, atropos_wf, [("in_files", "inputnode.in_files")]), - (inu_n4_final, atropos_wf, [("output_image", "inputnode.in_corrected")]), - (thr_brainmask, atropos_wf, [("output_image", "inputnode.in_mask")]), + (inputnode, atropos_wf, [('in_files', 'inputnode.in_files')]), + (inu_n4_final, atropos_wf, [('output_image', 'inputnode.in_corrected')]), + (thr_brainmask, atropos_wf, [('output_image', 'inputnode.in_mask')]), (atropos_wf, outputnode, [ - ("outputnode.out_file", "out_file"), - ("outputnode.bias_corrected", "bias_corrected"), - ("outputnode.bias_image", "bias_image"), - ("outputnode.out_mask", "out_mask"), - ("outputnode.out_segm", "out_segm"), - ("outputnode.out_tpms", "out_tpms"), + ('outputnode.out_file', 'out_file'), + ('outputnode.bias_corrected', 'bias_corrected'), + ('outputnode.bias_image', 'bias_image'), + ('outputnode.out_mask', 'out_mask'), + ('outputnode.out_segm', 'out_segm'), + ('outputnode.out_tpms', 'out_tpms'), ]), ]) # fmt: on if wm_tpm: # fmt: off wf.connect([ - (map_wmmask, atropos_wf, [("output_image", "inputnode.wm_prior")]), + (map_wmmask, atropos_wf, [('output_image', 'inputnode.wm_prior')]), ]) # fmt: on return wf def init_atropos_wf( - name="atropos_wf", + name='atropos_wf', use_random_seed=True, omp_nthreads=None, mem_gb=3.0, padding=10, - in_segmentation_model=tuple(ATROPOS_MODELS["T1w"].values()), + in_segmentation_model=T1W_MODEL, bspline_fitting_distance=200, wm_prior=False, ): @@ -589,30 +589,28 @@ def init_atropos_wf( """ wf = pe.Workflow(name) - out_fields = ["bias_corrected", "bias_image", "out_mask", "out_segm", "out_tpms"] + out_fields = ['bias_corrected', 'bias_image', 'out_mask', 'out_segm', 'out_tpms'] inputnode = pe.Node( - niu.IdentityInterface( - fields=["in_files", "in_corrected", "in_mask", "wm_prior"] - ), - name="inputnode", + niu.IdentityInterface(fields=['in_files', 'in_corrected', 'in_mask', 'wm_prior']), + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface(fields=["out_file"] + out_fields), name="outputnode" + niu.IdentityInterface(fields=['out_file'] + out_fields), name='outputnode' ) copy_xform = pe.Node( - CopyXForm(fields=out_fields), name="copy_xform", run_without_submitting=True + CopyXForm(fields=out_fields), name='copy_xform', run_without_submitting=True ) # Morphological dilation, radius=2 dil_brainmask = pe.Node( - ImageMath(operation="MD", op2="2", copy_header=True), name="dil_brainmask" + ImageMath(operation='MD', op2='2', copy_header=True), name='dil_brainmask' ) # Get largest connected component get_brainmask = pe.Node( - ImageMath(operation="GetLargestComponent", copy_header=True), - name="get_brainmask", + ImageMath(operation='GetLargestComponent', copy_header=True), + name='get_brainmask', ) # Run atropos (core node) @@ -620,8 +618,8 @@ def init_atropos_wf( Atropos( convergence_threshold=0.0, dimension=3, - initialization="KMeans", - likelihood_model="Gaussian", + initialization='KMeans', + likelihood_model='Gaussian', mrf_radius=[1, 1, 1], mrf_smoothing_factor=0.1, n_iterations=3, @@ -629,42 +627,40 @@ def init_atropos_wf( save_posteriors=True, use_random_seed=use_random_seed, ), - name="01_atropos", + name='01_atropos', n_procs=omp_nthreads, mem_gb=mem_gb, ) # massage outputs pad_segm = pe.Node( - ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False), - name="02_pad_segm", + ImageMath(operation='PadImage', op2=f'{padding}', copy_header=False), + name='02_pad_segm', ) pad_mask = pe.Node( - ImageMath(operation="PadImage", op2=f"{padding}", copy_header=False), - name="03_pad_mask", + ImageMath(operation='PadImage', op2=f'{padding}', copy_header=False), + name='03_pad_mask', ) # Split segmentation in binary masks sel_labels = pe.Node( - niu.Function( - function=_select_labels, output_names=["out_wm", "out_gm", "out_csf"] - ), - name="04_sel_labels", + niu.Function(function=_select_labels, output_names=['out_wm', 'out_gm', 'out_csf']), + name='04_sel_labels', ) sel_labels.inputs.labels = list(reversed(in_segmentation_model[1:])) # Select largest components (GM, WM) # ImageMath ${DIMENSION} ${EXTRACTION_WM} GetLargestComponent ${EXTRACTION_WM} - get_wm = pe.Node(ImageMath(operation="GetLargestComponent"), name="05_get_wm") - get_gm = pe.Node(ImageMath(operation="GetLargestComponent"), name="06_get_gm") + get_wm = pe.Node(ImageMath(operation='GetLargestComponent'), name='05_get_wm') + get_gm = pe.Node(ImageMath(operation='GetLargestComponent'), name='06_get_gm') # Fill holes and calculate intersection # ImageMath ${DIMENSION} ${EXTRACTION_TMP} FillHoles ${EXTRACTION_GM} 2 # MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${EXTRACTION_TMP} ${EXTRACTION_GM} - fill_gm = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="07_fill_gm") + fill_gm = pe.Node(ImageMath(operation='FillHoles', op2='2'), name='07_fill_gm') mult_gm = pe.Node( - MultiplyImages(dimension=3, output_product_image="08_mult_gm.nii.gz"), - name="08_mult_gm", + MultiplyImages(dimension=3, output_product_image='08_mult_gm.nii.gz'), + name='08_mult_gm', ) # MultiplyImages ${DIMENSION} ${EXTRACTION_WM} ${ATROPOS_WM_CLASS_LABEL} ${EXTRACTION_WM} @@ -673,78 +669,72 @@ def init_atropos_wf( MultiplyImages( dimension=3, second_input=in_segmentation_model[-1], - output_product_image="09_relabel_wm.nii.gz", + output_product_image='09_relabel_wm.nii.gz', ), - name="09_relabel_wm", + name='09_relabel_wm', ) - me_csf = pe.Node(ImageMath(operation="ME", op2="10"), name="10_me_csf") + me_csf = pe.Node(ImageMath(operation='ME', op2='10'), name='10_me_csf') # ImageMath ${DIMENSION} ${EXTRACTION_GM} addtozero ${EXTRACTION_GM} ${EXTRACTION_TMP} # MultiplyImages ${DIMENSION} ${EXTRACTION_GM} ${ATROPOS_GM_CLASS_LABEL} ${EXTRACTION_GM} # ImageMath ${DIMENSION} ${EXTRACTION_SEGMENTATION} addtozero ${EXTRACTION_WM} ${EXTRACTION_GM} - add_gm = pe.Node(ImageMath(operation="addtozero"), name="11_add_gm") + add_gm = pe.Node(ImageMath(operation='addtozero'), name='11_add_gm') relabel_gm = pe.Node( MultiplyImages( dimension=3, second_input=in_segmentation_model[-2], - output_product_image="12_relabel_gm.nii.gz", + output_product_image='12_relabel_gm.nii.gz', ), - name="12_relabel_gm", + name='12_relabel_gm', ) - add_gm_wm = pe.Node(ImageMath(operation="addtozero"), name="13_add_gm_wm") + add_gm_wm = pe.Node(ImageMath(operation='addtozero'), name='13_add_gm_wm') # Superstep 7 # Split segmentation in binary masks sel_labels2 = pe.Node( - niu.Function(function=_select_labels, output_names=["out_gm", "out_wm"]), - name="14_sel_labels2", + niu.Function(function=_select_labels, output_names=['out_gm', 'out_wm']), + name='14_sel_labels2', ) sel_labels2.inputs.labels = in_segmentation_model[2:] # ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} ${EXTRACTION_TMP} - add_7 = pe.Node(ImageMath(operation="addtozero"), name="15_add_7") + add_7 = pe.Node(ImageMath(operation='addtozero'), name='15_add_7') # ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 2 - me_7 = pe.Node(ImageMath(operation="ME", op2="2"), name="16_me_7") + me_7 = pe.Node(ImageMath(operation='ME', op2='2'), name='16_me_7') # ImageMath ${DIMENSION} ${EXTRACTION_MASK} GetLargestComponent ${EXTRACTION_MASK} - comp_7 = pe.Node(ImageMath(operation="GetLargestComponent"), name="17_comp_7") + comp_7 = pe.Node(ImageMath(operation='GetLargestComponent'), name='17_comp_7') # ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 4 - md_7 = pe.Node(ImageMath(operation="MD", op2="4"), name="18_md_7") + md_7 = pe.Node(ImageMath(operation='MD', op2='4'), name='18_md_7') # ImageMath ${DIMENSION} ${EXTRACTION_MASK} FillHoles ${EXTRACTION_MASK} 2 - fill_7 = pe.Node(ImageMath(operation="FillHoles", op2="2"), name="19_fill_7") + fill_7 = pe.Node(ImageMath(operation='FillHoles', op2='2'), name='19_fill_7') # ImageMath ${DIMENSION} ${EXTRACTION_MASK} addtozero ${EXTRACTION_MASK} \ # ${EXTRACTION_MASK_PRIOR_WARPED} - add_7_2 = pe.Node(ImageMath(operation="addtozero"), name="20_add_7_2") + add_7_2 = pe.Node(ImageMath(operation='addtozero'), name='20_add_7_2') # ImageMath ${DIMENSION} ${EXTRACTION_MASK} MD ${EXTRACTION_MASK} 5 - md_7_2 = pe.Node(ImageMath(operation="MD", op2="5"), name="21_md_7_2") + md_7_2 = pe.Node(ImageMath(operation='MD', op2='5'), name='21_md_7_2') # ImageMath ${DIMENSION} ${EXTRACTION_MASK} ME ${EXTRACTION_MASK} 5 - me_7_2 = pe.Node(ImageMath(operation="ME", op2="5"), name="22_me_7_2") + me_7_2 = pe.Node(ImageMath(operation='ME', op2='5'), name='22_me_7_2') # De-pad depad_mask = pe.Node( - ImageMath(operation="PadImage", op2="-%d" % padding), name="23_depad_mask" + ImageMath(operation='PadImage', op2='-%d' % padding), name='23_depad_mask' ) depad_segm = pe.Node( - ImageMath(operation="PadImage", op2="-%d" % padding), name="24_depad_segm" - ) - depad_gm = pe.Node( - ImageMath(operation="PadImage", op2="-%d" % padding), name="25_depad_gm" - ) - depad_wm = pe.Node( - ImageMath(operation="PadImage", op2="-%d" % padding), name="26_depad_wm" - ) - depad_csf = pe.Node( - ImageMath(operation="PadImage", op2="-%d" % padding), name="27_depad_csf" + ImageMath(operation='PadImage', op2='-%d' % padding), name='24_depad_segm' ) + depad_gm = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding), name='25_depad_gm') + depad_wm = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding), name='26_depad_wm') + depad_csf = pe.Node(ImageMath(operation='PadImage', op2='-%d' % padding), name='27_depad_csf') - msk_conform = pe.Node(niu.Function(function=_conform_mask), name="msk_conform") - merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name="merge_tpms") + msk_conform = pe.Node(niu.Function(function=_conform_mask), name='msk_conform') + merge_tpms = pe.Node(niu.Merge(in_segmentation_model[0]), name='merge_tpms') - sel_wm = pe.Node(niu.Select(), name="sel_wm", run_without_submitting=True) + sel_wm = pe.Node(niu.Select(), name='sel_wm', run_without_submitting=True) if not wm_prior: sel_wm.inputs.index = in_segmentation_model[-1] - 1 copy_xform_wm = pe.Node( - CopyXForm(fields=["wm_map"]), name="copy_xform_wm", run_without_submitting=True + CopyXForm(fields=['wm_map']), name='copy_xform_wm', run_without_submitting=True ) # Refine INU correction @@ -759,8 +749,8 @@ def init_atropos_wf( bspline_fitting_distance=bspline_fitting_distance, ), n_procs=omp_nthreads, - name="inu_n4_final", - iterfield=["input_image"], + name='inu_n4_final', + iterfield=['input_image'], ) try: @@ -768,74 +758,75 @@ def init_atropos_wf( except ValueError: warn( "N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 " - f"({inu_n4_final.interface.version} found.) Please consider upgrading.", + f'({inu_n4_final.interface.version} found.) Please consider upgrading.', UserWarning, + stacklevel=1, ) # Apply mask - apply_mask = pe.MapNode(ApplyMask(), iterfield=["in_file"], name="apply_mask") + apply_mask = pe.MapNode(ApplyMask(), iterfield=['in_file'], name='apply_mask') # fmt: off wf.connect([ - (inputnode, dil_brainmask, [("in_mask", "op1")]), - (inputnode, copy_xform, [(("in_files", _pop), "hdr_file")]), - (inputnode, copy_xform_wm, [(("in_files", _pop), "hdr_file")]), - (inputnode, pad_mask, [("in_mask", "op1")]), - (inputnode, atropos, [("in_corrected", "intensity_images")]), - (inputnode, inu_n4_final, [("in_files", "input_image")]), - (inputnode, msk_conform, [(("in_files", _pop), "in_reference")]), - (dil_brainmask, get_brainmask, [("output_image", "op1")]), - (get_brainmask, atropos, [("output_image", "mask_image")]), - (atropos, pad_segm, [("classified_image", "op1")]), - (pad_segm, sel_labels, [("output_image", "in_segm")]), - (sel_labels, get_wm, [("out_wm", "op1")]), - (sel_labels, get_gm, [("out_gm", "op1")]), - (get_gm, fill_gm, [("output_image", "op1")]), - (get_gm, mult_gm, [("output_image", "first_input")]), - (fill_gm, mult_gm, [("output_image", "second_input")]), - (get_wm, relabel_wm, [("output_image", "first_input")]), - (sel_labels, me_csf, [("out_csf", "op1")]), - (mult_gm, add_gm, [("output_product_image", "op1")]), - (me_csf, add_gm, [("output_image", "op2")]), - (add_gm, relabel_gm, [("output_image", "first_input")]), - (relabel_wm, add_gm_wm, [("output_product_image", "op1")]), - (relabel_gm, add_gm_wm, [("output_product_image", "op2")]), - (add_gm_wm, sel_labels2, [("output_image", "in_segm")]), - (sel_labels2, add_7, [("out_wm", "op1"), ("out_gm", "op2")]), - (add_7, me_7, [("output_image", "op1")]), - (me_7, comp_7, [("output_image", "op1")]), - (comp_7, md_7, [("output_image", "op1")]), - (md_7, fill_7, [("output_image", "op1")]), - (fill_7, add_7_2, [("output_image", "op1")]), - (pad_mask, add_7_2, [("output_image", "op2")]), - (add_7_2, md_7_2, [("output_image", "op1")]), - (md_7_2, me_7_2, [("output_image", "op1")]), - (me_7_2, depad_mask, [("output_image", "op1")]), - (add_gm_wm, depad_segm, [("output_image", "op1")]), - (relabel_wm, depad_wm, [("output_product_image", "op1")]), - (relabel_gm, depad_gm, [("output_product_image", "op1")]), - (sel_labels, depad_csf, [("out_csf", "op1")]), - (depad_csf, merge_tpms, [("output_image", "in1")]), - (depad_gm, merge_tpms, [("output_image", "in2")]), - (depad_wm, merge_tpms, [("output_image", "in3")]), - (depad_mask, msk_conform, [("output_image", "in_mask")]), - (msk_conform, copy_xform, [("out", "out_mask")]), - (depad_segm, copy_xform, [("output_image", "out_segm")]), - (merge_tpms, copy_xform, [("out", "out_tpms")]), - (atropos, sel_wm, [("posteriors", "inlist")]), - (sel_wm, copy_xform_wm, [("out", "wm_map")]), - (copy_xform_wm, inu_n4_final, [("wm_map", "weight_image")]), - (inu_n4_final, copy_xform, [("output_image", "bias_corrected"), - ("bias_image", "bias_image")]), - (copy_xform, apply_mask, [("bias_corrected", "in_file"), - ("out_mask", "in_mask")]), - (apply_mask, outputnode, [("out_file", "out_file")]), + (inputnode, dil_brainmask, [('in_mask', 'op1')]), + (inputnode, copy_xform, [(('in_files', _pop), 'hdr_file')]), + (inputnode, copy_xform_wm, [(('in_files', _pop), 'hdr_file')]), + (inputnode, pad_mask, [('in_mask', 'op1')]), + (inputnode, atropos, [('in_corrected', 'intensity_images')]), + (inputnode, inu_n4_final, [('in_files', 'input_image')]), + (inputnode, msk_conform, [(('in_files', _pop), 'in_reference')]), + (dil_brainmask, get_brainmask, [('output_image', 'op1')]), + (get_brainmask, atropos, [('output_image', 'mask_image')]), + (atropos, pad_segm, [('classified_image', 'op1')]), + (pad_segm, sel_labels, [('output_image', 'in_segm')]), + (sel_labels, get_wm, [('out_wm', 'op1')]), + (sel_labels, get_gm, [('out_gm', 'op1')]), + (get_gm, fill_gm, [('output_image', 'op1')]), + (get_gm, mult_gm, [('output_image', 'first_input')]), + (fill_gm, mult_gm, [('output_image', 'second_input')]), + (get_wm, relabel_wm, [('output_image', 'first_input')]), + (sel_labels, me_csf, [('out_csf', 'op1')]), + (mult_gm, add_gm, [('output_product_image', 'op1')]), + (me_csf, add_gm, [('output_image', 'op2')]), + (add_gm, relabel_gm, [('output_image', 'first_input')]), + (relabel_wm, add_gm_wm, [('output_product_image', 'op1')]), + (relabel_gm, add_gm_wm, [('output_product_image', 'op2')]), + (add_gm_wm, sel_labels2, [('output_image', 'in_segm')]), + (sel_labels2, add_7, [('out_wm', 'op1'), ('out_gm', 'op2')]), + (add_7, me_7, [('output_image', 'op1')]), + (me_7, comp_7, [('output_image', 'op1')]), + (comp_7, md_7, [('output_image', 'op1')]), + (md_7, fill_7, [('output_image', 'op1')]), + (fill_7, add_7_2, [('output_image', 'op1')]), + (pad_mask, add_7_2, [('output_image', 'op2')]), + (add_7_2, md_7_2, [('output_image', 'op1')]), + (md_7_2, me_7_2, [('output_image', 'op1')]), + (me_7_2, depad_mask, [('output_image', 'op1')]), + (add_gm_wm, depad_segm, [('output_image', 'op1')]), + (relabel_wm, depad_wm, [('output_product_image', 'op1')]), + (relabel_gm, depad_gm, [('output_product_image', 'op1')]), + (sel_labels, depad_csf, [('out_csf', 'op1')]), + (depad_csf, merge_tpms, [('output_image', 'in1')]), + (depad_gm, merge_tpms, [('output_image', 'in2')]), + (depad_wm, merge_tpms, [('output_image', 'in3')]), + (depad_mask, msk_conform, [('output_image', 'in_mask')]), + (msk_conform, copy_xform, [('out', 'out_mask')]), + (depad_segm, copy_xform, [('output_image', 'out_segm')]), + (merge_tpms, copy_xform, [('out', 'out_tpms')]), + (atropos, sel_wm, [('posteriors', 'inlist')]), + (sel_wm, copy_xform_wm, [('out', 'wm_map')]), + (copy_xform_wm, inu_n4_final, [('wm_map', 'weight_image')]), + (inu_n4_final, copy_xform, [('output_image', 'bias_corrected'), + ('bias_image', 'bias_image')]), + (copy_xform, apply_mask, [('bias_corrected', 'in_file'), + ('out_mask', 'in_mask')]), + (apply_mask, outputnode, [('out_file', 'out_file')]), (copy_xform, outputnode, [ - ("bias_corrected", "bias_corrected"), - ("bias_image", "bias_image"), - ("out_mask", "out_mask"), - ("out_segm", "out_segm"), - ("out_tpms", "out_tpms"), + ('bias_corrected', 'bias_corrected'), + ('bias_image', 'bias_image'), + ('out_mask', 'out_mask'), + ('out_segm', 'out_segm'), + ('out_tpms', 'out_tpms'), ]), ]) # fmt: on @@ -850,27 +841,27 @@ def _argmax(in_dice): match_wm = pe.Node( niu.Function(function=_matchlen), - name="match_wm", + name='match_wm', run_without_submitting=True, ) - overlap = pe.Node(FuzzyOverlap(), name="overlap", run_without_submitting=True) + overlap = pe.Node(FuzzyOverlap(), name='overlap', run_without_submitting=True) - apply_wm_prior = pe.Node(niu.Function(function=_improd), name="apply_wm_prior") + apply_wm_prior = pe.Node(niu.Function(function=_improd), name='apply_wm_prior') # fmt: off wf.disconnect([ - (copy_xform_wm, inu_n4_final, [("wm_map", "weight_image")]), + (copy_xform_wm, inu_n4_final, [('wm_map', 'weight_image')]), ]) wf.connect([ - (inputnode, apply_wm_prior, [("in_mask", "in_mask"), - ("wm_prior", "op2")]), - (inputnode, match_wm, [("wm_prior", "value")]), - (atropos, match_wm, [("posteriors", "reference")]), - (atropos, overlap, [("posteriors", "in_ref")]), - (match_wm, overlap, [("out", "in_tst")]), - (overlap, sel_wm, [(("class_fdi", _argmax), "index")]), - (copy_xform_wm, apply_wm_prior, [("wm_map", "op1")]), - (apply_wm_prior, inu_n4_final, [("out", "weight_image")]), + (inputnode, apply_wm_prior, [('in_mask', 'in_mask'), + ('wm_prior', 'op2')]), + (inputnode, match_wm, [('wm_prior', 'value')]), + (atropos, match_wm, [('posteriors', 'reference')]), + (atropos, overlap, [('posteriors', 'in_ref')]), + (match_wm, overlap, [('out', 'in_tst')]), + (overlap, sel_wm, [(('class_fdi', _argmax), 'index')]), + (copy_xform_wm, apply_wm_prior, [('wm_map', 'op1')]), + (apply_wm_prior, inu_n4_final, [('out', 'weight_image')]), ]) # fmt: on return wf @@ -880,9 +871,9 @@ def init_n4_only_wf( atropos_model=None, atropos_refine=True, atropos_use_random_seed=True, - bids_suffix="T1w", + bids_suffix='T1w', mem_gb=3.0, - name="n4_only_wf", + name='n4_only_wf', omp_nthreads=None, ): """ @@ -956,26 +947,24 @@ def init_n4_only_wf( wf = pe.Workflow(name) - inputnode = pe.Node( - niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode" - ) + inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 'in_mask']), name='inputnode') outputnode = pe.Node( niu.IdentityInterface( fields=[ - "out_file", - "out_mask", - "bias_corrected", - "bias_image", - "out_segm", - "out_tpms", + 'out_file', + 'out_mask', + 'bias_corrected', + 'bias_image', + 'out_segm', + 'out_tpms', ] ), - name="outputnode", + name='outputnode', ) # Create brain mask - thr_brainmask = pe.Node(Binarize(thresh_low=2), name="binarize") + thr_brainmask = pe.Node(Binarize(thresh_low=2), name='binarize') # INU correction inu_n4_final = pe.MapNode( @@ -989,8 +978,8 @@ def init_n4_only_wf( bspline_fitting_distance=200, ), n_procs=omp_nthreads, - name="inu_n4_final", - iterfield=["input_image"], + name='inu_n4_final', + iterfield=['input_image'], ) # Check ANTs version @@ -999,18 +988,19 @@ def init_n4_only_wf( except ValueError: warn( "N4BiasFieldCorrection's --rescale-intensities option was added in ANTS 2.1.0 " - f"({inu_n4_final.interface.version} found.) Please consider upgrading.", + f'({inu_n4_final.interface.version} found.) Please consider upgrading.', UserWarning, + stacklevel=1, ) # fmt: off wf.connect([ - (inputnode, inu_n4_final, [("in_files", "input_image")]), - (inputnode, thr_brainmask, [(("in_files", _pop), "in_file")]), - (thr_brainmask, outputnode, [("out_mask", "out_mask")]), - (inu_n4_final, outputnode, [("output_image", "out_file"), - ("output_image", "bias_corrected"), - ("bias_image", "bias_image")]), + (inputnode, inu_n4_final, [('in_files', 'input_image')]), + (inputnode, thr_brainmask, [(('in_files', _pop), 'in_file')]), + (thr_brainmask, outputnode, [('out_mask', 'out_mask')]), + (inu_n4_final, outputnode, [('output_image', 'out_file'), + ('output_image', 'bias_corrected'), + ('bias_image', 'bias_image')]), ]) # fmt: on @@ -1026,20 +1016,20 @@ def init_n4_only_wf( # fmt: off wf.disconnect([ - (inu_n4_final, outputnode, [("output_image", "out_file"), - ("output_image", "bias_corrected"), - ("bias_image", "bias_image")]), + (inu_n4_final, outputnode, [('output_image', 'out_file'), + ('output_image', 'bias_corrected'), + ('bias_image', 'bias_image')]), ]) wf.connect([ - (inputnode, atropos_wf, [("in_files", "inputnode.in_files")]), - (inu_n4_final, atropos_wf, [("output_image", "inputnode.in_corrected")]), - (thr_brainmask, atropos_wf, [("out_mask", "inputnode.in_mask")]), + (inputnode, atropos_wf, [('in_files', 'inputnode.in_files')]), + (inu_n4_final, atropos_wf, [('output_image', 'inputnode.in_corrected')]), + (thr_brainmask, atropos_wf, [('out_mask', 'inputnode.in_mask')]), (atropos_wf, outputnode, [ - ("outputnode.out_file", "out_file"), - ("outputnode.bias_corrected", "bias_corrected"), - ("outputnode.bias_image", "bias_image"), - ("outputnode.out_segm", "out_segm"), - ("outputnode.out_tpms", "out_tpms"), + ('outputnode.out_file', 'out_file'), + ('outputnode.bias_corrected', 'bias_corrected'), + ('outputnode.bias_image', 'bias_image'), + ('outputnode.out_segm', 'out_segm'), + ('outputnode.out_tpms', 'out_tpms'), ]), ]) # fmt: on @@ -1049,19 +1039,20 @@ def init_n4_only_wf( def _select_labels(in_segm, labels): from os import getcwd - import numpy as np + import nibabel as nb + import numpy as np from nipype.utils.filemanip import fname_presuffix out_files = [] cwd = getcwd() nii = nb.load(in_segm) - label_data = np.asanyarray(nii.dataobj).astype("uint8") + label_data = np.asanyarray(nii.dataobj).astype('uint8') for label in labels: newnii = nii.__class__(np.uint8(label_data == label), nii.affine, nii.header) - newnii.set_data_dtype("uint8") - out_file = fname_presuffix(in_segm, suffix="_class-%02d" % label, newpath=cwd) + newnii.set_data_dtype('uint8') + out_file = fname_presuffix(in_segm, suffix='_class-%02d' % label, newpath=cwd) newnii.to_filename(out_file) out_files.append(out_file) return out_files @@ -1070,14 +1061,15 @@ def _select_labels(in_segm, labels): def _conform_mask(in_mask, in_reference): """Ensures the mask headers make sense and match those of the T1w""" from pathlib import Path - import numpy as np + import nibabel as nb + import numpy as np from nipype.utils.filemanip import fname_presuffix ref = nb.load(in_reference) nii = nb.load(in_mask) hdr = nii.header.copy() - hdr.set_data_dtype("int16") + hdr.set_data_dtype('int16') hdr.set_slope_inter(1, 0) qform, qcode = ref.header.get_qform(coded=True) @@ -1088,15 +1080,15 @@ def _conform_mask(in_mask, in_reference): if scode is not None: hdr.set_sform(sform, int(scode)) - if "_maths" in in_mask: # Cut the name at first _maths occurrence - ext = "".join(Path(in_mask).suffixes) + if '_maths' in in_mask: # Cut the name at first _maths occurrence + ext = ''.join(Path(in_mask).suffixes) basename = Path(in_mask).name - in_mask = basename.split("_maths")[0] + ext + in_mask = basename.split('_maths')[0] + ext - out_file = fname_presuffix(in_mask, suffix="_mask", newpath=str(Path())) - nii.__class__( - np.asanyarray(nii.dataobj).astype("int16"), ref.affine, hdr - ).to_filename(out_file) + out_file = fname_presuffix(in_mask, suffix='_mask', newpath=str(Path())) + nii.__class__(np.asanyarray(nii.dataobj).astype('int16'), ref.affine, hdr).to_filename( + out_file + ) return out_file @@ -1109,14 +1101,14 @@ def _imsum(op1, op2, out_file=None): im1 = nb.load(op1) - data = im1.get_fdata(dtype="float32") + nb.load(op2).get_fdata(dtype="float32") + data = im1.get_fdata(dtype='float32') + nb.load(op2).get_fdata(dtype='float32') data /= data.max() nii = nb.Nifti1Image(data, im1.affine, im1.header) if out_file is None: from pathlib import Path - out_file = str((Path() / "summap.nii.gz").absolute()) + out_file = str((Path() / 'summap.nii.gz').absolute()) nii.to_filename(out_file) return out_file @@ -1127,7 +1119,7 @@ def _improd(op1, op2, in_mask, out_file=None): im1 = nb.load(op1) - data = im1.get_fdata(dtype="float32") * nb.load(op2).get_fdata(dtype="float32") + data = im1.get_fdata(dtype='float32') * nb.load(op2).get_fdata(dtype='float32') mskdata = nb.load(in_mask).get_fdata() > 0 data[~mskdata] = 0 data[data < 0] = 0 @@ -1138,7 +1130,7 @@ def _improd(op1, op2, in_mask, out_file=None): if out_file is None: from pathlib import Path - out_file = str((Path() / "prodmap.nii.gz").absolute()) + out_file = str((Path() / 'prodmap.nii.gz').absolute()) nii.to_filename(out_file) return out_file diff --git a/niworkflows/anat/coregistration.py b/niworkflows/anat/coregistration.py index 557ec856a11..35aab5d1fb2 100644 --- a/niworkflows/anat/coregistration.py +++ b/niworkflows/anat/coregistration.py @@ -21,20 +21,21 @@ # https://www.nipreps.org/community/licensing/ # """Workflow for the registration of EPI datasets to anatomical space via reconstructed surfaces.""" -from nipype.pipeline import engine as pe -from nipype.interfaces import utility as niu + from nipype import logging +from nipype.interfaces import utility as niu +from nipype.pipeline import engine as pe -LOGGER = logging.getLogger("workflow") +LOGGER = logging.getLogger('workflow') def init_bbreg_wf( *, omp_nthreads, debug=False, - epi2t1w_init="register", + epi2t1w_init='register', epi2t1w_dof=6, - name="bbreg_wf", + name='bbreg_wf', use_bbr=None, ): """ @@ -107,9 +108,13 @@ def init_bbreg_wf( # See https://github.com/nipreps/fmriprep/issues/768 from ..interfaces.freesurfer import ( PatchedBBRegisterRPT as BBRegisterRPT, - PatchedMRICoregRPT as MRICoregRPT, + ) + from ..interfaces.freesurfer import ( PatchedLTAConvert as LTAConvert, ) + from ..interfaces.freesurfer import ( + PatchedMRICoregRPT as MRICoregRPT, + ) from ..interfaces.nitransforms import ConcatenateXFMs workflow = Workflow(name=name) @@ -118,64 +123,60 @@ def init_bbreg_wf( `bbregister` (FreeSurfer) which implements boundary-based registration [@bbr]. Co-registration was configured with {dof} degrees of freedom{reason}. """.format( - dof={6: "six", 9: "nine", 12: "twelve"}[epi2t1w_dof], - reason="" + dof={6: 'six', 9: 'nine', 12: 'twelve'}[epi2t1w_dof], + reason='' if epi2t1w_dof == 6 - else "to account for distortions remaining in the EPI reference", + else 'to account for distortions remaining in the EPI reference', ) inputnode = pe.Node( niu.IdentityInterface( [ - "in_file", - "fsnative2t1w_xfm", - "subjects_dir", - "subject_id", # BBRegister - "t1w_dseg", # FLIRT BBR - "t1w_brain", # FLIRT BBR + 'in_file', + 'fsnative2t1w_xfm', + 'subjects_dir', + 'subject_id', # BBRegister + 't1w_dseg', # FLIRT BBR + 't1w_brain', # FLIRT BBR ] ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( - niu.IdentityInterface( - ["itk_epi_to_t1w", "itk_t1w_to_epi", "out_report", "fallback"] - ), - name="outputnode", + niu.IdentityInterface(['itk_epi_to_t1w', 'itk_t1w_to_epi', 'out_report', 'fallback']), + name='outputnode', ) - if epi2t1w_init not in ("register", "header"): - raise ValueError(f"Unknown EPI-T1w initialization option: {epi2t1w_init}") + if epi2t1w_init not in ('register', 'header'): + raise ValueError(f'Unknown EPI-T1w initialization option: {epi2t1w_init}') # For now make BBR unconditional - in the future, we can fall back to identity, # but adding the flexibility without testing seems a bit dangerous - if epi2t1w_init == "header": + if epi2t1w_init == 'header': if use_bbr is False: - raise ValueError("Cannot disable BBR and use header registration") + raise ValueError('Cannot disable BBR and use header registration') if use_bbr is None: - LOGGER.warning("Initializing BBR with header; affine fallback disabled") + LOGGER.warning('Initializing BBR with header; affine fallback disabled') use_bbr = True - merge_ltas = pe.Node(niu.Merge(2), name="merge_ltas", run_without_submitting=True) - concat_xfm = pe.Node(ConcatenateXFMs(inverse=True), name="concat_xfm") + merge_ltas = pe.Node(niu.Merge(2), name='merge_ltas', run_without_submitting=True) + concat_xfm = pe.Node(ConcatenateXFMs(inverse=True), name='concat_xfm') # fmt:off workflow.connect([ # Output ITK transforms - (inputnode, merge_ltas, [("fsnative2t1w_xfm", "in2")]), - (merge_ltas, concat_xfm, [("out", "in_xfms")]), - (concat_xfm, outputnode, [("out_xfm", "itk_epi_to_t1w")]), - (concat_xfm, outputnode, [("out_inv", "itk_t1w_to_epi")]), + (inputnode, merge_ltas, [('fsnative2t1w_xfm', 'in2')]), + (merge_ltas, concat_xfm, [('out', 'in_xfms')]), + (concat_xfm, outputnode, [('out_xfm', 'itk_epi_to_t1w')]), + (concat_xfm, outputnode, [('out_inv', 'itk_t1w_to_epi')]), ]) # fmt:on if debug is True: from ..interfaces.nibabel import RegridToZooms - downsample = pe.Node( - RegridToZooms(zooms=(4.0, 4.0, 4.0), smooth=True), name="downsample" - ) - workflow.connect([(inputnode, downsample, [("in_file", "in_file")])]) + downsample = pe.Node(RegridToZooms(zooms=(4.0, 4.0, 4.0), smooth=True), name='downsample') + workflow.connect([(inputnode, downsample, [('in_file', 'in_file')])]) mri_coreg = pe.Node( MRICoregRPT( @@ -185,31 +186,31 @@ def init_bbreg_wf( linmintol=0.01, generate_report=not use_bbr, ), - name="mri_coreg", + name='mri_coreg', n_procs=omp_nthreads, mem_gb=5, ) # Use mri_coreg - if epi2t1w_init == "register": + if epi2t1w_init == 'register': # fmt:off workflow.connect([ - (inputnode, mri_coreg, [("subjects_dir", "subjects_dir"), - ("subject_id", "subject_id")]), + (inputnode, mri_coreg, [('subjects_dir', 'subjects_dir'), + ('subject_id', 'subject_id')]), ]) # fmt:on if not debug: - workflow.connect(inputnode, "in_file", mri_coreg, "source_file") + workflow.connect(inputnode, 'in_file', mri_coreg, 'source_file') else: - workflow.connect(downsample, "out_file", mri_coreg, "source_file") + workflow.connect(downsample, 'out_file', mri_coreg, 'source_file') # Short-circuit workflow building, use initial registration if use_bbr is False: # fmt:off workflow.connect([ - (mri_coreg, outputnode, [("out_report", "out_report")]), - (mri_coreg, merge_ltas, [("out_lta_file", "in1")]), + (mri_coreg, outputnode, [('out_report', 'out_report')]), + (mri_coreg, merge_ltas, [('out_lta_file', 'in1')]), ]) # fmt:on outputnode.inputs.fallback = True @@ -219,38 +220,38 @@ def init_bbreg_wf( bbregister = pe.Node( BBRegisterRPT( dof=epi2t1w_dof, - contrast_type="t2", + contrast_type='t2', registered_file=True, out_lta_file=True, generate_report=True, ), - name="bbregister", + name='bbregister', mem_gb=12, ) # fmt:off workflow.connect([ - (inputnode, bbregister, [("subjects_dir", "subjects_dir"), - ("subject_id", "subject_id")]), + (inputnode, bbregister, [('subjects_dir', 'subjects_dir'), + ('subject_id', 'subject_id')]), ]) # fmt:on if not debug: - workflow.connect(inputnode, "in_file", bbregister, "source_file") + workflow.connect(inputnode, 'in_file', bbregister, 'source_file') else: - workflow.connect(downsample, "out_file", bbregister, "source_file") + workflow.connect(downsample, 'out_file', bbregister, 'source_file') - if epi2t1w_init == "header": - bbregister.inputs.init = "header" + if epi2t1w_init == 'header': + bbregister.inputs.init = 'header' else: - workflow.connect([(mri_coreg, bbregister, [("out_lta_file", "init_reg_file")])]) + workflow.connect([(mri_coreg, bbregister, [('out_lta_file', 'init_reg_file')])]) # Short-circuit workflow building, use boundary-based registration if use_bbr is True: # fmt:off workflow.connect([ - (bbregister, outputnode, [("out_report", "out_report")]), - (bbregister, merge_ltas, [("out_lta_file", "in1")]), + (bbregister, outputnode, [('out_report', 'out_report')]), + (bbregister, merge_ltas, [('out_lta_file', 'in1')]), ]) # fmt:on @@ -258,41 +259,35 @@ def init_bbreg_wf( return workflow # Only reach this point if epi2t1w_init is "register" and use_bbr is None - transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name="transforms") - reports = pe.Node(niu.Merge(2), run_without_submitting=True, name="reports") + transforms = pe.Node(niu.Merge(2), run_without_submitting=True, name='transforms') + reports = pe.Node(niu.Merge(2), run_without_submitting=True, name='reports') lta_ras2ras = pe.MapNode( - LTAConvert(out_lta=True), iterfield=["in_lta"], name="lta_ras2ras", mem_gb=2 - ) - compare_transforms = pe.Node( - niu.Function(function=compare_xforms), name="compare_transforms" + LTAConvert(out_lta=True), iterfield=['in_lta'], name='lta_ras2ras', mem_gb=2 ) + compare_transforms = pe.Node(niu.Function(function=compare_xforms), name='compare_transforms') - select_transform = pe.Node( - niu.Select(), run_without_submitting=True, name="select_transform" - ) - select_report = pe.Node( - niu.Select(), run_without_submitting=True, name="select_report" - ) + select_transform = pe.Node(niu.Select(), run_without_submitting=True, name='select_transform') + select_report = pe.Node(niu.Select(), run_without_submitting=True, name='select_report') # fmt:off workflow.connect([ - (bbregister, transforms, [("out_lta_file", "in1")]), - (mri_coreg, transforms, [("out_lta_file", "in2")]), + (bbregister, transforms, [('out_lta_file', 'in1')]), + (mri_coreg, transforms, [('out_lta_file', 'in2')]), # Normalize LTA transforms to RAS2RAS (inputs are VOX2VOX) and compare - (transforms, lta_ras2ras, [("out", "in_lta")]), - (lta_ras2ras, compare_transforms, [("out_lta", "lta_list")]), - (compare_transforms, outputnode, [("out", "fallback")]), + (transforms, lta_ras2ras, [('out', 'in_lta')]), + (lta_ras2ras, compare_transforms, [('out_lta', 'lta_list')]), + (compare_transforms, outputnode, [('out', 'fallback')]), # Select output transform - (transforms, select_transform, [("out", "inlist")]), - (compare_transforms, select_transform, [("out", "index")]), - (select_transform, merge_ltas, [("out", "in1")]), + (transforms, select_transform, [('out', 'inlist')]), + (compare_transforms, select_transform, [('out', 'index')]), + (select_transform, merge_ltas, [('out', 'in1')]), # Select output report - (bbregister, reports, [("out_report", "in1")]), - (mri_coreg, reports, [("out_report", "in2")]), - (reports, select_report, [("out", "inlist")]), - (compare_transforms, select_report, [("out", "index")]), - (select_report, outputnode, [("out", "out_report")]), + (bbregister, reports, [('out_report', 'in1')]), + (mri_coreg, reports, [('out_report', 'in2')]), + (reports, select_report, [('out', 'inlist')]), + (compare_transforms, select_report, [('out', 'index')]), + (select_report, outputnode, [('out', 'out_report')]), ]) # fmt:on @@ -329,9 +324,10 @@ def compare_xforms(lta_list, norm_threshold=15): second transform relative to the first (default: `15`) """ - from niworkflows.interfaces.surf import load_transform from nipype.algorithms.rapidart import _calc_norm_affine + from niworkflows.interfaces.surf import load_transform + bbr_affine = load_transform(lta_list[0]) fallback_affine = load_transform(lta_list[1]) diff --git a/niworkflows/anat/freesurfer.py b/niworkflows/anat/freesurfer.py index aba48ede7e0..ff46ea7332c 100644 --- a/niworkflows/anat/freesurfer.py +++ b/niworkflows/anat/freesurfer.py @@ -23,21 +23,24 @@ """FreeSurfer-related workflows.""" from os import getenv -from nipype.pipeline import engine as pe -from nipype.interfaces import utility as niu -from nipype.interfaces import io as nio + from nipype.interfaces import freesurfer as fs +from nipype.interfaces import io as nio +from nipype.interfaces import utility as niu +from nipype.pipeline import engine as pe from ..interfaces.freesurfer import ( MakeMidthickness, +) +from ..interfaces.freesurfer import ( PatchedRobustRegister as RobustRegister, ) from ..interfaces.surf import NormalizeSurf +SUBJECTS_DIR = getenv('SUBJECTS_DIR') -def init_gifti_surface_wf( - name="gifti_surface_wf", subjects_dir=getenv("SUBJECTS_DIR", None) -): + +def init_gifti_surface_wf(name='gifti_surface_wf', subjects_dir=SUBJECTS_DIR): """ Build a Nipype workflow to prepare GIFTI surfaces from FreeSurfer. @@ -85,71 +88,67 @@ def init_gifti_surface_wf( """ if subjects_dir is None: - raise RuntimeError("``$SUBJECTS_DIR`` must be set") + raise RuntimeError('``$SUBJECTS_DIR`` must be set') workflow = pe.Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface(["in_t1w", "subject_id"]), name="inputnode" - ) + inputnode = pe.Node(niu.IdentityInterface(['in_t1w', 'subject_id']), name='inputnode') outputnode = pe.Node( - niu.IdentityInterface(["surfaces", "surf_norm", "fsnative_to_t1w_xfm"]), - name="outputnode", + niu.IdentityInterface(['surfaces', 'surf_norm', 'fsnative_to_t1w_xfm']), + name='outputnode', ) fssource = pe.Node( nio.FreeSurferSource(subjects_dir=subjects_dir), - name="fssource", + name='fssource', run_without_submitting=True, ) fsnative_2_t1_xfm = pe.Node( - RobustRegister(auto_sens=True, est_int_scale=True), name="fsnative_2_t1_xfm" + RobustRegister(auto_sens=True, est_int_scale=True), name='fsnative_2_t1_xfm' ) midthickness = pe.MapNode( - MakeMidthickness(thickness=True, distance=0.5, out_name="midthickness"), - iterfield="in_file", - name="midthickness", + MakeMidthickness(thickness=True, distance=0.5, out_name='midthickness'), + iterfield='in_file', + name='midthickness', ) save_midthickness = pe.Node( nio.DataSink(parameterization=False, base_directory=subjects_dir), - name="save_midthickness", + name='save_midthickness', run_without_submitting=True, ) surface_list = pe.Node( niu.Merge(4, ravel_inputs=True), - name="surface_list", + name='surface_list', run_without_submitting=True, ) - fs_2_gii = pe.MapNode( - fs.MRIsConvert(out_datatype="gii"), iterfield="in_file", name="fs_2_gii" - ) - fix_surfs = pe.MapNode(NormalizeSurf(), iterfield="in_file", name="fix_surfs") + fs_2_gii = pe.MapNode(fs.MRIsConvert(out_datatype='gii'), iterfield='in_file', name='fs_2_gii') + fix_surfs = pe.MapNode(NormalizeSurf(), iterfield='in_file', name='fix_surfs') # fmt: off workflow.connect([ - (inputnode, fssource, [("subject_id", "subject_id")]), - (inputnode, save_midthickness, [("subject_id", "container")]), + (inputnode, fssource, [('subject_id', 'subject_id')]), + (inputnode, save_midthickness, [('subject_id', 'container')]), # Generate fsnative-to-T1w transform - (inputnode, fsnative_2_t1_xfm, [("in_t1w", "target_file")]), - (fssource, fsnative_2_t1_xfm, [("orig", "source_file")]), + (inputnode, fsnative_2_t1_xfm, [('in_t1w', 'target_file')]), + (fssource, fsnative_2_t1_xfm, [('orig', 'source_file')]), # Generate midthickness surfaces and save to FreeSurfer derivatives - (fssource, midthickness, [("white", "in_file"), ("graymid", "graymid")]), - (midthickness, save_midthickness, [("out_file", "surf.@graymid")]), + (fssource, midthickness, [('white', 'in_file'), ('graymid', 'graymid')]), + (midthickness, save_midthickness, [('out_file', 'surf.@graymid')]), # Produce valid GIFTI surface files (dense mesh) (fssource, surface_list, [ - ("white", "in1"), ("pial", "in2"), ("inflated", "in3"), + ('white', 'in1'), ('pial', 'in2'), ('inflated', 'in3'), ]), - (save_midthickness, surface_list, [("out_file", "in4")]), - (surface_list, fs_2_gii, [("out", "in_file")]), - (fs_2_gii, fix_surfs, [("converted", "in_file")]), - (fsnative_2_t1_xfm, fix_surfs, [("out_reg_file", "transform_file")]), - (fsnative_2_t1_xfm, outputnode, [("out_reg_file", "fsnative_to_t1w_xfm")]), - (fix_surfs, outputnode, [("out_file", "surf_norm")]), - (fs_2_gii, outputnode, [("converted", "surfaces")]), + (save_midthickness, surface_list, [('out_file', 'in4')]), + (surface_list, fs_2_gii, [('out', 'in_file')]), + (fs_2_gii, fix_surfs, [('converted', 'in_file')]), + (fsnative_2_t1_xfm, fix_surfs, [('out_reg_file', 'transform_file')]), + (fsnative_2_t1_xfm, outputnode, [('out_reg_file', 'fsnative_to_t1w_xfm')]), + (fix_surfs, outputnode, [('out_file', 'surf_norm')]), + (fs_2_gii, outputnode, [('converted', 'surfaces')]), ]) # fmt: on diff --git a/niworkflows/anat/skullstrip.py b/niworkflows/anat/skullstrip.py index 7f2f54f39ea..b18b86ddf5c 100644 --- a/niworkflows/anat/skullstrip.py +++ b/niworkflows/anat/skullstrip.py @@ -21,13 +21,16 @@ # https://www.nipreps.org/community/licensing/ # """Brain extraction workflows.""" -from nipype.interfaces import afni, utility as niu + +from nipype.interfaces import afni +from nipype.interfaces import utility as niu from nipype.pipeline import engine as pe -from ..interfaces.nibabel import Binarize + from ..interfaces.fixes import FixN4BiasFieldCorrection as N4BiasFieldCorrection +from ..interfaces.nibabel import Binarize -def afni_wf(name="AFNISkullStripWorkflow", unifize=False, n4_nthreads=1): +def afni_wf(name='AFNISkullStripWorkflow', unifize=False, n4_nthreads=1): """ Create a skull-stripping workflow based on AFNI's tools. @@ -72,12 +75,10 @@ def afni_wf(name="AFNISkullStripWorkflow", unifize=False, n4_nthreads=1): """ workflow = pe.Workflow(name=name) - inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') outputnode = pe.Node( - niu.IdentityInterface( - fields=["bias_corrected", "out_file", "out_mask", "bias_image"] - ), - name="outputnode", + niu.IdentityInterface(fields=['bias_corrected', 'out_file', 'out_mask', 'bias_image']), + name='outputnode', ) inu_n4 = pe.Node( @@ -89,51 +90,49 @@ def afni_wf(name="AFNISkullStripWorkflow", unifize=False, n4_nthreads=1): copy_header=True, ), n_procs=n4_nthreads, - name="inu_n4", + name='inu_n4', ) - sstrip = pe.Node(afni.SkullStrip(outputtype="NIFTI_GZ"), name="skullstrip") + sstrip = pe.Node(afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip') sstrip_orig_vol = pe.Node( - afni.Calc(expr="a*step(b)", outputtype="NIFTI_GZ"), name="sstrip_orig_vol" + afni.Calc(expr='a*step(b)', outputtype='NIFTI_GZ'), name='sstrip_orig_vol' ) - binarize = pe.Node(Binarize(thresh_low=0.0), name="binarize") + binarize = pe.Node(Binarize(thresh_low=0.0), name='binarize') if unifize: # Add two unifize steps, pre- and post- skullstripping. - inu_uni_0 = pe.Node( - afni.Unifize(outputtype="NIFTI_GZ"), name="unifize_pre_skullstrip" - ) + inu_uni_0 = pe.Node(afni.Unifize(outputtype='NIFTI_GZ'), name='unifize_pre_skullstrip') inu_uni_1 = pe.Node( - afni.Unifize(gm=True, outputtype="NIFTI_GZ"), name="unifize_post_skullstrip" + afni.Unifize(gm=True, outputtype='NIFTI_GZ'), name='unifize_post_skullstrip' ) # fmt: off workflow.connect([ - (inu_n4, inu_uni_0, [("output_image", "in_file")]), - (inu_uni_0, sstrip, [("out_file", "in_file")]), - (inu_uni_0, sstrip_orig_vol, [("out_file", "in_file_a")]), - (sstrip_orig_vol, inu_uni_1, [("out_file", "in_file")]), - (inu_uni_1, outputnode, [("out_file", "out_file")]), - (inu_uni_0, outputnode, [("out_file", "bias_corrected")]), + (inu_n4, inu_uni_0, [('output_image', 'in_file')]), + (inu_uni_0, sstrip, [('out_file', 'in_file')]), + (inu_uni_0, sstrip_orig_vol, [('out_file', 'in_file_a')]), + (sstrip_orig_vol, inu_uni_1, [('out_file', 'in_file')]), + (inu_uni_1, outputnode, [('out_file', 'out_file')]), + (inu_uni_0, outputnode, [('out_file', 'bias_corrected')]), ]) # fmt: on else: # fmt: off workflow.connect([ - (inputnode, sstrip_orig_vol, [("in_file", "in_file_a")]), - (inu_n4, sstrip, [("output_image", "in_file")]), - (sstrip_orig_vol, outputnode, [("out_file", "out_file")]), - (inu_n4, outputnode, [("output_image", "bias_corrected")]), + (inputnode, sstrip_orig_vol, [('in_file', 'in_file_a')]), + (inu_n4, sstrip, [('output_image', 'in_file')]), + (sstrip_orig_vol, outputnode, [('out_file', 'out_file')]), + (inu_n4, outputnode, [('output_image', 'bias_corrected')]), ]) # fmt: on # Remaining connections # fmt: off workflow.connect([ - (sstrip, sstrip_orig_vol, [("out_file", "in_file_b")]), - (inputnode, inu_n4, [("in_file", "input_image")]), - (sstrip_orig_vol, binarize, [("out_file", "in_file")]), - (binarize, outputnode, [("out_mask", "out_mask")]), - (inu_n4, outputnode, [("bias_image", "bias_image")]), + (sstrip, sstrip_orig_vol, [('out_file', 'in_file_b')]), + (inputnode, inu_n4, [('in_file', 'input_image')]), + (sstrip_orig_vol, binarize, [('out_file', 'in_file')]), + (binarize, outputnode, [('out_mask', 'out_mask')]), + (inu_n4, outputnode, [('bias_image', 'bias_image')]), ]) # fmt: on return workflow diff --git a/niworkflows/cli/boldref.py b/niworkflows/cli/boldref.py index c329680f00f..5ac0ffba87e 100644 --- a/niworkflows/cli/boldref.py +++ b/niworkflows/cli/boldref.py @@ -21,48 +21,48 @@ # https://www.nipreps.org/community/licensing/ # """Run the BOLD reference+mask workflow""" + import os def get_parser(): """Build parser object.""" - from argparse import ArgumentParser - from argparse import RawTextHelpFormatter, RawDescriptionHelpFormatter + from argparse import ArgumentParser, RawDescriptionHelpFormatter, RawTextHelpFormatter parser = ArgumentParser( description="""NiWorkflows Utilities""", formatter_class=RawTextHelpFormatter ) - subparsers = parser.add_subparsers(dest="command") + subparsers = parser.add_subparsers(dest='command') be_parser = subparsers.add_parser( - "brain-extract", + 'brain-extract', formatter_class=RawDescriptionHelpFormatter, description="""Execute brain extraction and related operations (e.g., \ intensity nonuniformity correction, robust averaging, etc.)""", ) - be_parser.add_argument("input_file", action="store", help="the input file") - be_parser.add_argument("out_path", action="store", help="the output directory") + be_parser.add_argument('input_file', action='store', help='the input file') + be_parser.add_argument('out_path', action='store', help='the output directory') be_parser.add_argument( - "--modality", - "-m", - action="store", - choices=("bold", "t1w"), - default="bold", - help="the input file", + '--modality', + '-m', + action='store', + choices=('bold', 't1w'), + default='bold', + help='the input file', ) parser.add_argument( - "--omp-nthreads", - action="store", + '--omp-nthreads', + action='store', type=int, default=os.cpu_count(), - help="Number of CPUs available to individual processes", + help='Number of CPUs available to individual processes', ) parser.add_argument( - "--nprocs", - action="store", + '--nprocs', + action='store', type=int, default=os.cpu_count(), - help="Number of processes that may run in parallel", + help='Number of processes that may run in parallel', ) return parser @@ -71,25 +71,28 @@ def get_parser(): def main(args=None): """Entry point.""" from nipype.utils.filemanip import hash_infile + from ..func.util import init_bold_reference_wf opts = get_parser().parse_args(args=args) wf = init_bold_reference_wf( - opts.omp_nthreads, gen_report=True, name=hash_infile(opts.input_file), + opts.omp_nthreads, + gen_report=True, + name=hash_infile(opts.input_file), ) wf.inputs.inputnode.bold_file = opts.input_file wf.base_dir = os.getcwd() plugin = { - "plugin": "MultiProc", - "plugin_args": {"nprocs": opts.nprocs}, + 'plugin': 'MultiProc', + 'plugin_args': {'nprocs': opts.nprocs}, } if opts.nprocs < 2: - plugin = {"plugin": "Linear"} + plugin = {'plugin': 'Linear'} wf.run(**plugin) -if __name__ == "__main__": +if __name__ == '__main__': from sys import argv main(args=argv[1:]) diff --git a/niworkflows/conftest.py b/niworkflows/conftest.py index 75a19fe26db..0e072421edc 100644 --- a/niworkflows/conftest.py +++ b/niworkflows/conftest.py @@ -21,14 +21,16 @@ # https://www.nipreps.org/community/licensing/ # """py.test configuration""" + import os -from sys import version_info +import tempfile from pathlib import Path -import numpy as np +from sys import version_info + import nibabel as nb +import numpy as np import pandas as pd import pytest -import tempfile from . import load_resource @@ -44,47 +46,47 @@ def find_resource_or_skip(resource): pathlike = load_resource(resource) if not pathlike.exists(): - pytest.skip(f"Missing resource {resource}; run this test from a source repository") + pytest.skip(f'Missing resource {resource}; run this test from a source repository') return pathlike -@pytest.fixture(scope="session", autouse=True) +@pytest.fixture(scope='session', autouse=True) def legacy_printoptions(): from packaging.version import Version - if Version(np.__version__) >= Version("1.22"): - np.set_printoptions(legacy="1.21") + if Version(np.__version__) >= Version('1.22'): + np.set_printoptions(legacy='1.21') @pytest.fixture(autouse=True) -def add_np(doctest_namespace): - from .utils.bids import collect_data +def _add_np(doctest_namespace): from .testing import data_dir, data_dir_canary + from .utils.bids import collect_data - doctest_namespace["PY_VERSION"] = version_info - doctest_namespace["np"] = np - doctest_namespace["nb"] = nb - doctest_namespace["pd"] = pd - doctest_namespace["os"] = os - doctest_namespace["pytest"] = pytest - doctest_namespace["importlib_resources"] = importlib_resources - doctest_namespace["find_resource_or_skip"] = find_resource_or_skip - doctest_namespace["Path"] = Path - doctest_namespace["datadir"] = data_dir - doctest_namespace["data_dir_canary"] = data_dir_canary - doctest_namespace["bids_collect_data"] = collect_data - doctest_namespace["test_data"] = load_resource('tests/data') + doctest_namespace['PY_VERSION'] = version_info + doctest_namespace['np'] = np + doctest_namespace['nb'] = nb + doctest_namespace['pd'] = pd + doctest_namespace['os'] = os + doctest_namespace['pytest'] = pytest + doctest_namespace['importlib_resources'] = importlib_resources + doctest_namespace['find_resource_or_skip'] = find_resource_or_skip + doctest_namespace['Path'] = Path + doctest_namespace['datadir'] = data_dir + doctest_namespace['data_dir_canary'] = data_dir_canary + doctest_namespace['bids_collect_data'] = collect_data + doctest_namespace['test_data'] = load_resource('tests/data') tmpdir = tempfile.TemporaryDirectory() - doctest_namespace["tmpdir"] = tmpdir.name + doctest_namespace['tmpdir'] = tmpdir.name - nifti_fname = str(Path(tmpdir.name) / "test.nii.gz") - nii = nb.Nifti1Image(np.random.random((5, 5)).astype("f4"), np.eye(4)) + nifti_fname = str(Path(tmpdir.name) / 'test.nii.gz') + nii = nb.Nifti1Image(np.random.random((5, 5)).astype('f4'), np.eye(4)) nii.header.set_qform(np.diag([1, 1, 1, 1]), code=1) nii.header.set_sform(np.diag([-1, 1, 1, 1]), code=1) nii.to_filename(nifti_fname) - doctest_namespace["nifti_fname"] = nifti_fname + doctest_namespace['nifti_fname'] = nifti_fname cwd = os.getcwd() os.chdir(tmpdir.name) @@ -96,23 +98,27 @@ def add_np(doctest_namespace): @pytest.fixture def testdata_dir(): from .testing import data_dir + return data_dir @pytest.fixture def ds000030_dir(): - from .testing import test_data_env, data_env_canary + from .testing import data_env_canary, test_data_env + data_env_canary() - return Path(test_data_env) / "ds000030" + return Path(test_data_env) / 'ds000030' @pytest.fixture def workdir(): from .testing import test_workdir + return None if test_workdir is None else Path(test_workdir) @pytest.fixture def outdir(): from .testing import test_output_dir + return None if test_output_dir is None else Path(test_output_dir) diff --git a/niworkflows/engine/__init__.py b/niworkflows/engine/__init__.py index c8a53565712..04b57e55181 100644 --- a/niworkflows/engine/__init__.py +++ b/niworkflows/engine/__init__.py @@ -1,7 +1,7 @@ -#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The fmriprep reporting engine for visual assessment """ + from .workflows import LiterateWorkflow as Workflow diff --git a/niworkflows/engine/plugin.py b/niworkflows/engine/plugin.py index 14c2547f0c3..ba7e31e6007 100644 --- a/niworkflows/engine/plugin.py +++ b/niworkflows/engine/plugin.py @@ -22,14 +22,14 @@ # """A lightweight NiPype MultiProc execution plugin.""" +import gc +import multiprocessing as mp import os import sys +from concurrent.futures import ProcessPoolExecutor from copy import deepcopy from time import sleep, time -import multiprocessing as mp -from concurrent.futures import ProcessPoolExecutor from traceback import format_exception -import gc from nipype.utils.misc import str2bool @@ -54,14 +54,14 @@ def run_node(node, updatehash, taskid): """ # Init variables - result = dict(result=None, traceback=None, taskid=taskid) + result = {'result': None, 'traceback': None, 'taskid': taskid} # Try and execute the node via node.run() try: - result["result"] = node.run(updatehash=updatehash) + result['result'] = node.run(updatehash=updatehash) except: # noqa: E722, intendedly catch all here - result["traceback"] = format_exception(*sys.exc_info()) - result["result"] = node.result + result['traceback'] = format_exception(*sys.exc_info()) + result['result'] = node.result # Return the result dictionary return result @@ -76,7 +76,7 @@ def __init__(self, plugin_args=None): plugin_args = {} self.plugin_args = plugin_args self._config = None - self._status_callback = plugin_args.get("status_callback") + self._status_callback = plugin_args.get('status_callback') def run(self, graph, config, updatehash=False): """ @@ -143,7 +143,7 @@ def __init__(self, plugin_args=None): self.proc_done = None self.proc_pending = None self.pending_tasks = [] - self.max_jobs = self.plugin_args.get("max_jobs", None) + self.max_jobs = self.plugin_args.get('max_jobs', None) def _prerun_check(self, graph): """Stub method to validate/massage graph and nodes before running.""" @@ -156,7 +156,7 @@ def run(self, graph, config, updatehash=False): import numpy as np self._config = config - poll_sleep_secs = float(config["execution"]["poll_sleep_duration"]) + poll_sleep_secs = float(config['execution']['poll_sleep_duration']) self._prerun_check(graph) # Generate appropriate structures for worker-manager model @@ -180,15 +180,16 @@ def run(self, graph, config, updatehash=False): errors.append(exc) else: if result: - if result["traceback"]: + if result['traceback']: notrun.append(self._clean_queue(jobid, graph, result=result)) - errors.append("".join(result["traceback"])) + errors.append(''.join(result['traceback'])) else: self._task_finished_cb(jobid) self._remove_node_dirs() self._clear_task(taskid) else: - assert self.proc_done[jobid] and self.proc_pending[jobid] + assert self.proc_done[jobid] + assert self.proc_pending[jobid] toappend.insert(0, (taskid, jobid)) if toappend: @@ -214,7 +215,7 @@ def run(self, graph, config, updatehash=False): if len(errors) > 1: error, cause = ( - RuntimeError(f"{len(errors)} raised. Re-raising first."), + RuntimeError(f'{len(errors)} raised. Re-raising first.'), error, ) @@ -231,8 +232,8 @@ def _report_crash(self, node, result=None): tb = None if result is not None: - node._result = result["result"] - tb = result["traceback"] + node._result = result['result'] + tb = result['traceback'] node._traceback = tb return report_crash(node, traceback=tb) @@ -241,16 +242,16 @@ def _clear_task(self, taskid): def _clean_queue(self, jobid, graph, result=None): if self._status_callback: - self._status_callback(self.procs[jobid], "exception") + self._status_callback(self.procs[jobid], 'exception') if result is None: result = { - "result": None, - "traceback": "\n".join(format_exception(*sys.exc_info())), + 'result': None, + 'traceback': '\n'.join(format_exception(*sys.exc_info())), } crashfile = self._report_crash(self.procs[jobid], result=result) - if str2bool(self._config["execution"]["stop_on_first_crash"]): - raise RuntimeError("".join(result["traceback"])) + if str2bool(self._config['execution']['stop_on_first_crash']): + raise RuntimeError(''.join(result['traceback'])) if jobid in self.mapnodesubids: # remove current jobid self.proc_pending[jobid] = False @@ -279,11 +280,11 @@ def _submit_mapnode(self, jobid): self.procs.extend(mapnodesubids) self.depidx = ssp.vstack( (self.depidx, ssp.lil_matrix(np.zeros((numnodes, self.depidx.shape[1])))), - "lil", + 'lil', ) self.depidx = ssp.hstack( (self.depidx, ssp.lil_matrix(np.zeros((self.depidx.shape[0], numnodes)))), - "lil", + 'lil', ) self.depidx[-numnodes:, jobid] = 1 self.proc_done = np.concatenate((self.proc_done, np.zeros(numnodes, dtype=bool))) @@ -291,7 +292,7 @@ def _submit_mapnode(self, jobid): return False def _local_hash_check(self, jobid, graph): - if not str2bool(self.procs[jobid].config["execution"]["local_hash_check"]): + if not str2bool(self.procs[jobid].config['execution']['local_hash_check']): return False try: @@ -319,7 +320,7 @@ def _task_finished_cb(self, jobid, cached=False): This is called when a job is completed. """ if self._status_callback: - self._status_callback(self.procs[jobid], "end") + self._status_callback(self.procs[jobid], 'end') # Update job and worker queues self.proc_pending[jobid] = False # update the job dependency structure @@ -342,7 +343,7 @@ def _generate_dependency_list(self, graph): from networkx import to_scipy_sparse_matrix as to_scipy_sparse_array self.procs, _ = topological_sort(graph) - self.depidx = to_scipy_sparse_array(graph, nodelist=self.procs, format="lil") + self.depidx = to_scipy_sparse_array(graph, nodelist=self.procs, format='lil') self.refidx = self.depidx.astype(int) self.proc_done = np.zeros(len(self.procs), dtype=bool) self.proc_pending = np.zeros(len(self.procs), dtype=bool) @@ -354,19 +355,20 @@ def _remove_node_deps(self, jobid, crashfile, graph): dfs_preorder = nx.dfs_preorder except AttributeError: dfs_preorder = nx.dfs_preorder_nodes - subnodes = [s for s in dfs_preorder(graph, self.procs[jobid])] + subnodes = list(dfs_preorder(graph, self.procs[jobid])) for node in subnodes: idx = self.procs.index(node) self.proc_done[idx] = True self.proc_pending[idx] = False - return dict(node=self.procs[jobid], dependents=subnodes, crashfile=crashfile) + return {'node': self.procs[jobid], 'dependents': subnodes, 'crashfile': crashfile} def _remove_node_dirs(self): """Remove directories whose outputs have already been used up.""" - import numpy as np from shutil import rmtree - if str2bool(self._config["execution"]["remove_node_directories"]): + import numpy as np + + if str2bool(self._config['execution']['remove_node_directories']): indices = np.nonzero((self.refidx.sum(axis=1) == 0).__array__())[0] for idx in indices: if idx in self.mapnodesubids: @@ -430,9 +432,10 @@ def __init__(self, pool=None, plugin_args=None): # Retrieve a nipreps-style configuration object try: - config = plugin_args["app_config"] + config = plugin_args['app_config'] except (KeyError, TypeError): from types import SimpleNamespace + from nipype.utils.profiler import get_system_total_memory_gb config = SimpleNamespace( @@ -447,15 +450,15 @@ def __init__(self, pool=None, plugin_args=None): ) # Read in options or set defaults. - self.processors = self.plugin_args.get("n_procs", mp.cpu_count()) + self.processors = self.plugin_args.get('n_procs', mp.cpu_count()) self.memory_gb = self.plugin_args.get( - "memory_gb", # Allocate 90% of system memory + 'memory_gb', # Allocate 90% of system memory config.environment.total_memory * 0.9, ) - self.raise_insufficient = self.plugin_args.get("raise_insufficient", False) + self.raise_insufficient = self.plugin_args.get('raise_insufficient', False) # Instantiate different thread pools for non-daemon processes - mp_context = mp.get_context(self.plugin_args.get("mp_context")) + mp_context = mp.get_context(self.plugin_args.get('mp_context')) self.pool = pool or ProcessPoolExecutor( max_workers=self.processors, initializer=config._process_initializer, @@ -467,7 +470,7 @@ def __init__(self, pool=None, plugin_args=None): def _async_callback(self, args): result = args.result() - self._taskresult[result["taskid"]] = result + self._taskresult[result['taskid']] = result def _get_result(self, taskid): return self._taskresult.get(taskid) @@ -479,8 +482,8 @@ def _submit_job(self, node, updatehash=False): self._taskid += 1 # Don't allow streaming outputs - if getattr(node.interface, "terminal_output", "") == "stream": - node.interface.terminal_output = "allatonce" + if getattr(node.interface, 'terminal_output', '') == 'stream': + node.interface.terminal_output = 'allatonce' result_future = self.pool.submit(run_node, node, updatehash, self._taskid) result_future.add_done_callback(self._async_callback) @@ -501,7 +504,7 @@ def _prerun_check(self, graph): np.any(np.array(tasks_mem_gb) > self.memory_gb) or np.any(np.array(tasks_num_th) > self.processors) ): - raise RuntimeError("Insufficient resources available for job") + raise RuntimeError('Insufficient resources available for job') def _postrun_check(self): self.pool.shutdown() @@ -545,7 +548,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): if len(jobids) + len(self.pending_tasks) == 0: return - jobids = self._sort_jobs(jobids, scheduler=self.plugin_args.get("scheduler")) + jobids = self._sort_jobs(jobids, scheduler=self.plugin_args.get('scheduler')) # Run garbage collector before potentially submitting jobs gc.collect() @@ -553,13 +556,13 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Submit jobs for jobid in jobids: # First expand mapnodes - if self.procs[jobid].__class__.__name__ == "MapNode": + if self.procs[jobid].__class__.__name__ == 'MapNode': try: num_subnodes = self.procs[jobid].num_subnodes() except Exception: traceback = format_exception(*sys.exc_info()) self._clean_queue( - jobid, graph, result={"result": None, "traceback": traceback} + jobid, graph, result={'result': None, 'traceback': traceback} ) self.proc_pending[jobid] = False continue @@ -593,7 +596,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): except Exception: traceback = format_exception(*sys.exc_info()) self._clean_queue( - jobid, graph, result={"result": None, "traceback": traceback} + jobid, graph, result={'result': None, 'traceback': traceback} ) # Release resources @@ -611,7 +614,7 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Task should be submitted to workers # Send job to task manager and add to pending tasks if self._status_callback: - self._status_callback(self.procs[jobid], "start") + self._status_callback(self.procs[jobid], 'start') tid = self._submit_job(deepcopy(self.procs[jobid]), updatehash=updatehash) if tid is None: self.proc_done[jobid] = False @@ -621,8 +624,8 @@ def _send_procs_to_workers(self, updatehash=False, graph=None): # Display stats next loop self._stats = None - def _sort_jobs(self, jobids, scheduler="tsort"): - if scheduler == "mem_thread": + def _sort_jobs(self, jobids, scheduler='tsort'): + if scheduler == 'mem_thread': return sorted( jobids, key=lambda item: (self.procs[item].mem_gb, self.procs[item].n_procs), diff --git a/niworkflows/engine/tests/test_plugin.py b/niworkflows/engine/tests/test_plugin.py index 6956ba19c6f..4ac852aee2c 100644 --- a/niworkflows/engine/tests/test_plugin.py +++ b/niworkflows/engine/tests/test_plugin.py @@ -2,8 +2,8 @@ from types import SimpleNamespace import pytest -from nipype.pipeline import engine as pe from nipype.interfaces import utility as niu +from nipype.pipeline import engine as pe from ..plugin import MultiProcPlugin @@ -21,36 +21,36 @@ def addall(inlist): @pytest.fixture def workflow(tmp_path): - workflow = pe.Workflow(name="test_wf", base_dir=tmp_path) + workflow = pe.Workflow(name='test_wf', base_dir=tmp_path) - inputnode = pe.Node(niu.IdentityInterface(fields=["x", "y"]), name="inputnode") - outputnode = pe.Node(niu.IdentityInterface(fields=["z"]), name="outputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=['x', 'y']), name='inputnode') + outputnode = pe.Node(niu.IdentityInterface(fields=['z']), name='outputnode') # Generate many nodes and claim a lot of memory add_nd = pe.MapNode( - niu.Function(function=add, input_names=["x", "y"], output_names=["z"]), - name="add", - iterfield=["x"], + niu.Function(function=add, input_names=['x', 'y'], output_names=['z']), + name='add', + iterfield=['x'], mem_gb=0.8, ) # Regular node - sum_nd = pe.Node(niu.Function(function=addall, input_names=["inlist"]), name="sum") + sum_nd = pe.Node(niu.Function(function=addall, input_names=['inlist']), name='sum') # Run without submitting is another code path add_more_nd = pe.Node( - niu.Function(function=add, input_names=["x", "y"], output_names=["z"]), - name="add_more", + niu.Function(function=add, input_names=['x', 'y'], output_names=['z']), + name='add_more', run_without_submitting=True, ) workflow.connect( [ - (inputnode, add_nd, [("x", "x"), ("y", "y")]), - (add_nd, sum_nd, [("z", "inlist")]), - (sum_nd, add_more_nd, [("out", "x")]), - (inputnode, add_more_nd, [("y", "y")]), - (add_more_nd, outputnode, [("z", "z")]), + (inputnode, add_nd, [('x', 'x'), ('y', 'y')]), + (add_nd, sum_nd, [('z', 'inlist')]), + (sum_nd, add_more_nd, [('out', 'x')]), + (inputnode, add_more_nd, [('y', 'y')]), + (add_more_nd, outputnode, [('z', 'z')]), ] ) @@ -58,42 +58,41 @@ def workflow(tmp_path): inputnode.inputs.y = 4 # Avoid unnecessary sleeps - workflow.config["execution"]["poll_sleep_duration"] = 0 + workflow.config['execution']['poll_sleep_duration'] = 0 return workflow def test_plugin_defaults(workflow, caplog): """Test the plugin works without any arguments.""" - caplog.set_level(logging.CRITICAL, logger="nipype.workflow") + caplog.set_level(logging.CRITICAL, logger='nipype.workflow') workflow.run(plugin=MultiProcPlugin()) def test_plugin_args_noconfig(workflow, caplog): """Test the plugin works with typical nipype arguments.""" - caplog.set_level(logging.CRITICAL, logger="nipype.workflow") - workflow.run(plugin=MultiProcPlugin(plugin_args={"n_procs": 2, "memory_gb": 0.1})) + caplog.set_level(logging.CRITICAL, logger='nipype.workflow') + workflow.run(plugin=MultiProcPlugin(plugin_args={'n_procs': 2, 'memory_gb': 0.1})) def touch_file(file_path: str) -> None: """Module-level functions play more nicely with multiprocessing.""" - with open(file_path, "w") as f: - f.write("flag") + with open(file_path, 'w') as f: + f.write('flag') def test_plugin_app_config(tmp_path, workflow, caplog): """Test the plugin works with a nipreps-style configuration.""" - init_flag = tmp_path / "init_flag.txt" + init_flag = tmp_path / 'init_flag.txt' app_config = SimpleNamespace( environment=SimpleNamespace(total_memory=1), _process_initializer=touch_file, file_path=str(init_flag), ) - caplog.set_level(logging.INFO, logger="nipype.workflow") - workflow.run( - plugin=MultiProcPlugin(plugin_args={"n_procs": 2, "app_config": app_config}) - ) + caplog.set_level(logging.INFO, logger='nipype.workflow') + workflow.run(plugin=MultiProcPlugin(plugin_args={'n_procs': 2, 'app_config': app_config})) - assert init_flag.exists() and init_flag.read_text() == "flag" + assert init_flag.exists() + assert init_flag.read_text() == 'flag' diff --git a/niworkflows/engine/tests/test_workflows.py b/niworkflows/engine/tests/test_workflows.py index 5d6ff8404d3..b5cdc05f83a 100644 --- a/niworkflows/engine/tests/test_workflows.py +++ b/niworkflows/engine/tests/test_workflows.py @@ -21,26 +21,27 @@ # https://www.nipreps.org/community/licensing/ # """Test the LiterateWorkflow.""" + +from nipype.interfaces import afni +from nipype.interfaces import utility as niu from nipype.pipeline.engine import Node -from nipype.interfaces import afni, utility as niu + from ..workflows import LiterateWorkflow as Workflow -def _reorient_wf(name="ReorientWorkflow"): +def _reorient_wf(name='ReorientWorkflow'): """A workflow to reorient images to 'RPI' orientation.""" workflow = Workflow(name=name) - workflow.__desc__ = "Inner workflow. " - inputnode = Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode") - outputnode = Node(niu.IdentityInterface(fields=["out_file"]), name="outputnode") - deoblique = Node(afni.Refit(deoblique=True), name="deoblique") - reorient = Node( - afni.Resample(orientation="RPI", outputtype="NIFTI_GZ"), name="reorient" - ) + workflow.__desc__ = 'Inner workflow. ' + inputnode = Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') + outputnode = Node(niu.IdentityInterface(fields=['out_file']), name='outputnode') + deoblique = Node(afni.Refit(deoblique=True), name='deoblique') + reorient = Node(afni.Resample(orientation='RPI', outputtype='NIFTI_GZ'), name='reorient') workflow.connect( [ - (inputnode, deoblique, [("in_file", "in_file")]), - (deoblique, reorient, [("out_file", "in_file")]), - (reorient, outputnode, [("out_file", "out_file")]), + (inputnode, deoblique, [('in_file', 'in_file')]), + (deoblique, reorient, [('out_file', 'in_file')]), + (reorient, outputnode, [('out_file', 'out_file')]), ] ) return workflow @@ -48,20 +49,17 @@ def _reorient_wf(name="ReorientWorkflow"): def test_boilerplate(): """Check the boilerplate is generated.""" - workflow = Workflow(name="test") - workflow.__desc__ = "Outer workflow. " - workflow.__postdesc__ = "Outer workflow (postdesc)." + workflow = Workflow(name='test') + workflow.__desc__ = 'Outer workflow. ' + workflow.__postdesc__ = 'Outer workflow (postdesc).' - inputnode = Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode") + inputnode = Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') inner = _reorient_wf() # fmt: off workflow.connect([ - (inputnode, inner, [("in_file", "inputnode.in_file")]), + (inputnode, inner, [('in_file', 'inputnode.in_file')]), ]) # fmt: on - assert ( - workflow.visit_desc() - == "Outer workflow. Inner workflow. Outer workflow (postdesc)." - ) + assert workflow.visit_desc() == 'Outer workflow. Inner workflow. Outer workflow (postdesc).' diff --git a/niworkflows/engine/workflows.py b/niworkflows/engine/workflows.py index 9f02f3674d8..dda14ba8f6c 100644 --- a/niworkflows/engine/workflows.py +++ b/niworkflows/engine/workflows.py @@ -25,6 +25,7 @@ Add special features to the Nipype's vanilla workflows """ + from nipype.pipeline import engine as pe @@ -63,4 +64,4 @@ def visit_desc(self): if self.__postdesc__: desc += [self.__postdesc__] - return "".join(desc) + return ''.join(desc) diff --git a/niworkflows/func/tests/test_util.py b/niworkflows/func/tests/test_util.py old mode 100755 new mode 100644 index 76fd652a3a1..2242ccc9b99 --- a/niworkflows/func/tests/test_util.py +++ b/niworkflows/func/tests/test_util.py @@ -21,47 +21,41 @@ # https://www.nipreps.org/community/licensing/ # """Testing module for fmriprep.workflows.bold.util.""" -import pytest + import os from pathlib import Path from shutil import which import numpy as np -from nipype.pipeline import engine as pe -from nipype.utils.filemanip import fname_presuffix, copyfile +import pytest from nilearn.image import load_img +from nipype.pipeline import engine as pe +from nipype.utils.filemanip import copyfile, fname_presuffix from ...interfaces.reportlets.masks import ROIsPlot from ...workflows.epi.refmap import init_epi_reference_wf - from ..util import init_enhance_and_skullstrip_bold_wf -datapath = os.getenv("FMRIPREP_REGRESSION_SOURCE") +datapath = os.getenv('FMRIPREP_REGRESSION_SOURCE') parameters = [] if datapath: datapath = Path(datapath) bold_datasets = [] - for ds in datapath.glob("ds*/"): - paths = [p for p in ds.glob("*_bold.nii.gz") if p.exists()] - subjects = set([p.name.replace("sub-", "").split("_")[0] for p in paths]) + for ds in datapath.glob('ds*/'): + paths = [p for p in ds.glob('*_bold.nii.gz') if p.exists()] + subjects = {p.name.replace('sub-', '').split('_')[0] for p in paths} for sub in subjects: - subject_data = [p for p in paths if p.name.startswith(f"sub-{sub}")] + subject_data = [p for p in paths if p.name.startswith(f'sub-{sub}')] se_epi = sorted( - [ - str(p.relative_to(datapath)) - for p in subject_data - if "echo-" not in p.name - ] + [str(p.relative_to(datapath)) for p in subject_data if 'echo-' not in p.name] ) if se_epi: bold_datasets.append(se_epi) - meecho = sorted( - [str(p.relative_to(datapath)) for p in paths if "echo-" in p.name] - ) + meecho = sorted([str(p.relative_to(datapath)) for p in paths if 'echo-' in p.name]) if meecho: bold_datasets.append([meecho[0]]) @@ -72,23 +66,21 @@ str( ( datapath - / "derivatives" - / path.replace("_echo-1", "").replace("_bold.nii", "_bold_mask.nii") + / 'derivatives' + / path.replace('_echo-1', '').replace('_bold.nii', '_bold_mask.nii') ).absolute() ) ) - bold_datasets = [ - [str((datapath / p).absolute()) for p in ds] for ds in bold_datasets - ] + bold_datasets = [[str((datapath / p).absolute()) for p in ds] for ds in bold_datasets] parameters = zip(bold_datasets, exp_masks) if not bold_datasets: raise RuntimeError( - f"Data folder <{datapath}> was provided, but no images were found. " - "Folder contents:\n{}".format( - "\n".join([str(p) for p in datapath.glob("ds*/*.nii.gz")]) + f'Data folder <{datapath}> was provided, but no images were found. ' + 'Folder contents:\n{}'.format( + '\n'.join([str(p) for p in datapath.glob('ds*/*.nii.gz')]) ) ) @@ -105,10 +97,10 @@ def symmetric_overlap(img1, img2): @pytest.mark.skipif( not datapath, - reason="FMRIPREP_REGRESSION_SOURCE env var not set, or no data is available", + reason='FMRIPREP_REGRESSION_SOURCE env var not set, or no data is available', ) -@pytest.mark.skipif(not which("antsAI"), reason="antsAI executable not found") -@pytest.mark.parametrize("input_fname,expected_fname", parameters) +@pytest.mark.skipif(not which('antsAI'), reason='antsAI executable not found') +@pytest.mark.parametrize(('input_fname', 'expected_fname'), parameters) def test_masking(input_fname, expected_fname): """Check for regressions in masking.""" from nipype import config as ncfg @@ -117,40 +109,36 @@ def test_masking(input_fname, expected_fname): dsname = Path(expected_fname).parent.name # Reconstruct base_fname from above - reports_dir = Path(os.getenv("FMRIPREP_REGRESSION_REPORTS", "")) + reports_dir = Path(os.getenv('FMRIPREP_REGRESSION_REPORTS', '')) newpath = reports_dir / dsname newpath.mkdir(parents=True, exist_ok=True) # Nipype config (logs and execution) ncfg.update_config( { - "execution": { - "crashdump_dir": str(newpath), + 'execution': { + 'crashdump_dir': str(newpath), } } ) - wf = pe.Workflow(name=basename.replace("_bold.nii.gz", "").replace("-", "_")) - base_dir = os.getenv("CACHED_WORK_DIRECTORY") + wf = pe.Workflow(name=basename.replace('_bold.nii.gz', '').replace('-', '_')) + base_dir = os.getenv('CACHED_WORK_DIRECTORY') if base_dir: base_dir = Path(base_dir) / dsname base_dir.mkdir(parents=True, exist_ok=True) wf.base_dir = str(base_dir) - epi_reference_wf = init_epi_reference_wf( - omp_nthreads=os.cpu_count(), auto_bold_nss=True - ) + epi_reference_wf = init_epi_reference_wf(omp_nthreads=os.cpu_count(), auto_bold_nss=True) epi_reference_wf.inputs.inputnode.in_files = input_fname enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf() out_fname = fname_presuffix( - Path(expected_fname).name, suffix=".svg", use_ext=False, newpath=str(newpath) + Path(expected_fname).name, suffix='.svg', use_ext=False, newpath=str(newpath) ) - mask_diff_plot = pe.Node( - ROIsPlot(colors=["limegreen"], levels=[0.5]), name="mask_diff_plot" - ) + mask_diff_plot = pe.Node(ROIsPlot(colors=['limegreen'], levels=[0.5]), name='mask_diff_plot') mask_diff_plot.always_run = True mask_diff_plot.inputs.in_mask = expected_fname mask_diff_plot.inputs.out_report = out_fname @@ -158,22 +146,22 @@ def test_masking(input_fname, expected_fname): # fmt:off wf.connect([ (epi_reference_wf, enhance_and_skullstrip_bold_wf, [ - ("outputnode.epi_ref_file", "inputnode.in_file") + ('outputnode.epi_ref_file', 'inputnode.in_file') ]), (enhance_and_skullstrip_bold_wf, mask_diff_plot, [ - ("outputnode.bias_corrected_file", "in_file"), - ("outputnode.mask_file", "in_rois"), + ('outputnode.bias_corrected_file', 'in_file'), + ('outputnode.mask_file', 'in_rois'), ]), ]) - res = wf.run(plugin="MultiProc") + res = wf.run(plugin='MultiProc') - combine_masks = [node for node in res.nodes if node.name.endswith("combine_masks")][ + combine_masks = [node for node in res.nodes if node.name.endswith('combine_masks')][ 0 ] overlap = symmetric_overlap(expected_fname, combine_masks.result.outputs.out_file) - mask_dir = reports_dir / "fmriprep_bold_mask" / dsname + mask_dir = reports_dir / 'fmriprep_bold_mask' / dsname mask_dir.mkdir(parents=True, exist_ok=True) copyfile( combine_masks.result.outputs.out_file, diff --git a/niworkflows/func/util.py b/niworkflows/func/util.py index 19321b87704..c4924f5af31 100644 --- a/niworkflows/func/util.py +++ b/niworkflows/func/util.py @@ -21,18 +21,23 @@ # https://www.nipreps.org/community/licensing/ # """Utility workflows.""" -from packaging.version import parse as parseversion, Version +from nipype.interfaces import afni, fsl +from nipype.interfaces import utility as niu from nipype.pipeline import engine as pe -from nipype.interfaces import utility as niu, fsl, afni - +from packaging.version import Version +from packaging.version import parse as parseversion from templateflow.api import get as get_template from .. import data from ..engine.workflows import LiterateWorkflow as Workflow from ..interfaces.fixes import ( - FixHeaderRegistration as Registration, FixHeaderApplyTransforms as ApplyTransforms, +) +from ..interfaces.fixes import ( + FixHeaderRegistration as Registration, +) +from ..interfaces.fixes import ( FixN4BiasFieldCorrection as N4BiasFieldCorrection, ) from ..interfaces.header import CopyHeader, CopyXForm, ValidateImage @@ -40,7 +45,6 @@ from ..utils.connections import listify from ..utils.misc import pass_dummy_scans as _pass_dummy_scans - DEFAULT_MEMORY_MIN_GB = 0.01 @@ -51,7 +55,7 @@ def init_bold_reference_wf( brainmask_thresh=0.85, pre_mask=False, multiecho=False, - name="bold_reference_wf", + name='bold_reference_wf', gen_report=False, ): """ @@ -140,9 +144,9 @@ def init_bold_reference_wf( * :py:func:`~niworkflows.func.util.init_enhance_and_skullstrip_wf` """ - from ..utils.connections import pop_file as _pop from ..interfaces.bold import NonsteadyStatesDetector from ..interfaces.images import RobustAverage + from ..utils.connections import pop_file as _pop workflow = Workflow(name=name) workflow.__desc__ = f"""\ @@ -152,27 +156,25 @@ def init_bold_reference_wf( """ inputnode = pe.Node( - niu.IdentityInterface( - fields=["bold_file", "bold_mask", "dummy_scans", "sbref_file"] - ), - name="inputnode", + niu.IdentityInterface(fields=['bold_file', 'bold_mask', 'dummy_scans', 'sbref_file']), + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "bold_file", - "all_bold_files", - "raw_ref_image", - "skip_vols", - "algo_dummy_scans", - "ref_image", - "ref_image_brain", - "bold_mask", - "validation_report", - "mask_report", + 'bold_file', + 'all_bold_files', + 'raw_ref_image', + 'skip_vols', + 'algo_dummy_scans', + 'ref_image', + 'ref_image_brain', + 'bold_mask', + 'validation_report', + 'mask_report', ] ), - name="outputnode", + name='outputnode', ) # Simplify manually setting input image @@ -181,13 +183,13 @@ def init_bold_reference_wf( val_bold = pe.MapNode( ValidateImage(), - name="val_bold", + name='val_bold', mem_gb=DEFAULT_MEMORY_MIN_GB, - iterfield=["in_file"], + iterfield=['in_file'], ) - get_dummy = pe.Node(NonsteadyStatesDetector(), name="get_dummy") - gen_avg = pe.Node(RobustAverage(), name="gen_avg", mem_gb=1) + get_dummy = pe.Node(NonsteadyStatesDetector(), name='get_dummy') + gen_avg = pe.Node(RobustAverage(), name='gen_avg', mem_gb=1) enhance_and_skullstrip_bold_wf = init_enhance_and_skullstrip_bold_wf( brainmask_thresh=brainmask_thresh, @@ -196,41 +198,41 @@ def init_bold_reference_wf( ) calc_dummy_scans = pe.Node( - niu.Function(function=_pass_dummy_scans, output_names=["skip_vols_num"]), - name="calc_dummy_scans", + niu.Function(function=_pass_dummy_scans, output_names=['skip_vols_num']), + name='calc_dummy_scans', run_without_submitting=True, mem_gb=DEFAULT_MEMORY_MIN_GB, ) # fmt: off workflow.connect([ - (inputnode, val_bold, [(("bold_file", listify), "in_file")]), - (inputnode, get_dummy, [(("bold_file", _pop), "in_file")]), - (inputnode, enhance_and_skullstrip_bold_wf, [("bold_mask", "inputnode.pre_mask")]), - (inputnode, calc_dummy_scans, [("dummy_scans", "dummy_scans")]), - (gen_avg, enhance_and_skullstrip_bold_wf, [("out_file", "inputnode.in_file")]), - (get_dummy, calc_dummy_scans, [("n_dummy", "algo_dummy_scans")]), - (calc_dummy_scans, outputnode, [("skip_vols_num", "skip_vols")]), - (gen_avg, outputnode, [("out_file", "raw_ref_image")]), - (get_dummy, outputnode, [("n_dummy", "algo_dummy_scans")]), - (val_bold, outputnode, [(("out_file", _pop), "bold_file"), - ("out_file", "all_bold_files"), - (("out_report", _pop), "validation_report")]), + (inputnode, val_bold, [(('bold_file', listify), 'in_file')]), + (inputnode, get_dummy, [(('bold_file', _pop), 'in_file')]), + (inputnode, enhance_and_skullstrip_bold_wf, [('bold_mask', 'inputnode.pre_mask')]), + (inputnode, calc_dummy_scans, [('dummy_scans', 'dummy_scans')]), + (gen_avg, enhance_and_skullstrip_bold_wf, [('out_file', 'inputnode.in_file')]), + (get_dummy, calc_dummy_scans, [('n_dummy', 'algo_dummy_scans')]), + (calc_dummy_scans, outputnode, [('skip_vols_num', 'skip_vols')]), + (gen_avg, outputnode, [('out_file', 'raw_ref_image')]), + (get_dummy, outputnode, [('n_dummy', 'algo_dummy_scans')]), + (val_bold, outputnode, [(('out_file', _pop), 'bold_file'), + ('out_file', 'all_bold_files'), + (('out_report', _pop), 'validation_report')]), (enhance_and_skullstrip_bold_wf, outputnode, [ - ("outputnode.bias_corrected_file", "ref_image"), - ("outputnode.mask_file", "bold_mask"), - ("outputnode.skull_stripped_file", "ref_image_brain"), + ('outputnode.bias_corrected_file', 'ref_image'), + ('outputnode.mask_file', 'bold_mask'), + ('outputnode.skull_stripped_file', 'ref_image_brain'), ]), ]) # fmt: on if gen_report: - mask_reportlet = pe.Node(SimpleShowMaskRPT(), name="mask_reportlet") + mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet') # fmt: off workflow.connect([ (enhance_and_skullstrip_bold_wf, mask_reportlet, [ - ("outputnode.bias_corrected_file", "background_file"), - ("outputnode.mask_file", "mask_file"), + ('outputnode.bias_corrected_file', 'background_file'), + ('outputnode.mask_file', 'mask_file'), ]), ]) # fmt: on @@ -238,8 +240,8 @@ def init_bold_reference_wf( if not sbref_files: # fmt: off workflow.connect([ - (val_bold, gen_avg, [(("out_file", _pop), "in_file")]), # pop first echo of ME-EPI - (get_dummy, gen_avg, [("t_mask", "t_mask")]), + (val_bold, gen_avg, [(('out_file', _pop), 'in_file')]), # pop first echo of ME-EPI + (get_dummy, gen_avg, [('t_mask', 't_mask')]), ]) # fmt: on return workflow @@ -254,17 +256,17 @@ def init_bold_reference_wf( val_sbref = pe.MapNode( ValidateImage(), - name="val_sbref", + name='val_sbref', mem_gb=DEFAULT_MEMORY_MIN_GB, - iterfield=["in_file"], + iterfield=['in_file'], ) - merge_sbrefs = pe.Node(MergeSeries(), name="merge_sbrefs") + merge_sbrefs = pe.Node(MergeSeries(), name='merge_sbrefs') # fmt: off workflow.connect([ - (inputnode, val_sbref, [(("sbref_file", listify), "in_file")]), - (val_sbref, merge_sbrefs, [("out_file", "in_files")]), - (merge_sbrefs, gen_avg, [("out_file", "in_file")]), + (inputnode, val_sbref, [(('sbref_file', listify), 'in_file')]), + (val_sbref, merge_sbrefs, [('out_file', 'in_files')]), + (merge_sbrefs, gen_avg, [('out_file', 'in_file')]), ]) # fmt: on @@ -279,7 +281,7 @@ def init_bold_reference_wf( def init_enhance_and_skullstrip_bold_wf( brainmask_thresh=0.5, - name="enhance_and_skullstrip_bold_wf", + name='enhance_and_skullstrip_bold_wf', omp_nthreads=1, pre_mask=False, ): @@ -366,164 +368,154 @@ def init_enhance_and_skullstrip_bold_wf( from niworkflows.interfaces.nibabel import ApplyMask, BinaryDilation workflow = Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface(fields=["in_file", "pre_mask"]), name="inputnode" - ) + inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'pre_mask']), name='inputnode') outputnode = pe.Node( - niu.IdentityInterface( - fields=["mask_file", "skull_stripped_file", "bias_corrected_file"] - ), - name="outputnode", + niu.IdentityInterface(fields=['mask_file', 'skull_stripped_file', 'bias_corrected_file']), + name='outputnode', ) # Run N4 normally, force num_threads=1 for stability (images are small, no need for >1) n4_correct = pe.Node( - N4BiasFieldCorrection( - dimension=3, copy_header=True, bspline_fitting_distance=200 - ), + N4BiasFieldCorrection(dimension=3, copy_header=True, bspline_fitting_distance=200), shrink_factor=2, - name="n4_correct", + name='n4_correct', n_procs=1, ) n4_correct.inputs.rescale_intensities = True # Create a generous BET mask out of the bias-corrected EPI - skullstrip_first_pass = pe.Node( - fsl.BET(frac=0.2, mask=True), name="skullstrip_first_pass" - ) - first_dilate = pe.Node(BinaryDilation(radius=6), name="first_dilate") - first_mask = pe.Node(ApplyMask(), name="first_mask") + skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True), name='skullstrip_first_pass') + first_dilate = pe.Node(BinaryDilation(radius=6), name='first_dilate') + first_mask = pe.Node(ApplyMask(), name='first_mask') # Use AFNI's unifize for T2 contrast & fix header unifize = pe.Node( afni.Unifize( t2=True, - outputtype="NIFTI_GZ", + outputtype='NIFTI_GZ', # Default -clfrac is 0.1, 0.4 was too conservative # -rbt because I'm a Jedi AFNI Master (see 3dUnifize's documentation) - args="-clfrac 0.2 -rbt 18.3 65.0 90.0", - out_file="uni.nii.gz", + args='-clfrac 0.2 -rbt 18.3 65.0 90.0', + out_file='uni.nii.gz', ), - name="unifize", + name='unifize', ) - fixhdr_unifize = pe.Node(CopyXForm(), name="fixhdr_unifize", mem_gb=0.1) + fixhdr_unifize = pe.Node(CopyXForm(), name='fixhdr_unifize', mem_gb=0.1) # Run ANFI's 3dAutomask to extract a refined brain mask skullstrip_second_pass = pe.Node( - afni.Automask(dilate=1, outputtype="NIFTI_GZ"), name="skullstrip_second_pass" + afni.Automask(dilate=1, outputtype='NIFTI_GZ'), name='skullstrip_second_pass' ) - fixhdr_skullstrip2 = pe.Node(CopyXForm(), name="fixhdr_skullstrip2", mem_gb=0.1) + fixhdr_skullstrip2 = pe.Node(CopyXForm(), name='fixhdr_skullstrip2', mem_gb=0.1) # Take intersection of both masks - combine_masks = pe.Node(fsl.BinaryMaths(operation="mul"), name="combine_masks") + combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'), name='combine_masks') # Compute masked brain - apply_mask = pe.Node(ApplyMask(), name="apply_mask") + apply_mask = pe.Node(ApplyMask(), name='apply_mask') if not pre_mask: from nipype.interfaces.ants.utils import AI bold_template = get_template( - "MNI152NLin2009cAsym", resolution=2, desc="fMRIPrep", suffix="boldref" - ) - brain_mask = get_template( - "MNI152NLin2009cAsym", resolution=2, desc="brain", suffix="mask" + 'MNI152NLin2009cAsym', resolution=2, desc='fMRIPrep', suffix='boldref' ) + brain_mask = get_template('MNI152NLin2009cAsym', resolution=2, desc='brain', suffix='mask') # Initialize transforms with antsAI init_aff = pe.Node( AI( fixed_image=str(bold_template), fixed_image_mask=str(brain_mask), - metric=("Mattes", 32, "Regular", 0.2), - transform=("Affine", 0.1), + metric=('Mattes', 32, 'Regular', 0.2), + transform=('Affine', 0.1), search_factor=(20, 0.12), principal_axes=False, convergence=(10, 1e-6, 10), verbose=True, ), - name="init_aff", + name='init_aff', n_procs=omp_nthreads, ) # Registration().version may be None - if parseversion(Registration().version or "0.0.0") > Version("2.2.0"): + if parseversion(Registration().version or '0.0.0') > Version('2.2.0'): init_aff.inputs.search_grid = (40, (0, 40, 40)) # Set up spatial normalization norm = pe.Node( - Registration(from_file=data.load("epi_atlasbased_brainmask.json")), - name="norm", + Registration(from_file=data.load('epi_atlasbased_brainmask.json')), + name='norm', n_procs=omp_nthreads, ) norm.inputs.fixed_image = str(bold_template) map_brainmask = pe.Node( ApplyTransforms( - interpolation="Linear", + interpolation='Linear', # Use the higher resolution and probseg for numerical stability in rounding input_image=str( get_template( - "MNI152NLin2009cAsym", + 'MNI152NLin2009cAsym', resolution=1, - label="brain", - suffix="probseg", + label='brain', + suffix='probseg', ) ), ), - name="map_brainmask", + name='map_brainmask', ) # Ensure mask's header matches reference's - fix_header = pe.Node(CopyHeader(), name="fix_header", run_without_submitting=True) + fix_header = pe.Node(CopyHeader(), name='fix_header', run_without_submitting=True) # fmt: off workflow.connect([ - (inputnode, fix_header, [("in_file", "hdr_file")]), - (inputnode, init_aff, [("in_file", "moving_image")]), - (inputnode, map_brainmask, [("in_file", "reference_image")]), - (inputnode, norm, [("in_file", "moving_image")]), - (init_aff, norm, [("output_transform", "initial_moving_transform")]), + (inputnode, fix_header, [('in_file', 'hdr_file')]), + (inputnode, init_aff, [('in_file', 'moving_image')]), + (inputnode, map_brainmask, [('in_file', 'reference_image')]), + (inputnode, norm, [('in_file', 'moving_image')]), + (init_aff, norm, [('output_transform', 'initial_moving_transform')]), (norm, map_brainmask, [ - ("reverse_invert_flags", "invert_transform_flags"), - ("reverse_transforms", "transforms"), + ('reverse_invert_flags', 'invert_transform_flags'), + ('reverse_transforms', 'transforms'), ]), - (map_brainmask, fix_header, [("output_image", "in_file")]), - (fix_header, n4_correct, [("out_file", "weight_image")]), + (map_brainmask, fix_header, [('output_image', 'in_file')]), + (fix_header, n4_correct, [('out_file', 'weight_image')]), ]) # fmt: on else: # fmt: off workflow.connect([ - (inputnode, n4_correct, [("pre_mask", "weight_image")]), + (inputnode, n4_correct, [('pre_mask', 'weight_image')]), ]) # fmt: on # fmt: off workflow.connect([ - (inputnode, n4_correct, [("in_file", "input_image")]), - (inputnode, fixhdr_unifize, [("in_file", "hdr_file")]), - (inputnode, fixhdr_skullstrip2, [("in_file", "hdr_file")]), - (n4_correct, skullstrip_first_pass, [("output_image", "in_file")]), - (skullstrip_first_pass, first_dilate, [("mask_file", "in_file")]), - (first_dilate, first_mask, [("out_file", "in_mask")]), - (skullstrip_first_pass, first_mask, [("out_file", "in_file")]), - (first_mask, unifize, [("out_file", "in_file")]), - (unifize, fixhdr_unifize, [("out_file", "in_file")]), - (fixhdr_unifize, skullstrip_second_pass, [("out_file", "in_file")]), - (skullstrip_first_pass, combine_masks, [("mask_file", "in_file")]), - (skullstrip_second_pass, fixhdr_skullstrip2, [("out_file", "in_file")]), - (fixhdr_skullstrip2, combine_masks, [("out_file", "operand_file")]), - (fixhdr_unifize, apply_mask, [("out_file", "in_file")]), - (combine_masks, apply_mask, [("out_file", "in_mask")]), - (combine_masks, outputnode, [("out_file", "mask_file")]), - (apply_mask, outputnode, [("out_file", "skull_stripped_file")]), - (n4_correct, outputnode, [("output_image", "bias_corrected_file")]), + (inputnode, n4_correct, [('in_file', 'input_image')]), + (inputnode, fixhdr_unifize, [('in_file', 'hdr_file')]), + (inputnode, fixhdr_skullstrip2, [('in_file', 'hdr_file')]), + (n4_correct, skullstrip_first_pass, [('output_image', 'in_file')]), + (skullstrip_first_pass, first_dilate, [('mask_file', 'in_file')]), + (first_dilate, first_mask, [('out_file', 'in_mask')]), + (skullstrip_first_pass, first_mask, [('out_file', 'in_file')]), + (first_mask, unifize, [('out_file', 'in_file')]), + (unifize, fixhdr_unifize, [('out_file', 'in_file')]), + (fixhdr_unifize, skullstrip_second_pass, [('out_file', 'in_file')]), + (skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]), + (skullstrip_second_pass, fixhdr_skullstrip2, [('out_file', 'in_file')]), + (fixhdr_skullstrip2, combine_masks, [('out_file', 'operand_file')]), + (fixhdr_unifize, apply_mask, [('out_file', 'in_file')]), + (combine_masks, apply_mask, [('out_file', 'in_mask')]), + (combine_masks, outputnode, [('out_file', 'mask_file')]), + (apply_mask, outputnode, [('out_file', 'skull_stripped_file')]), + (n4_correct, outputnode, [('output_image', 'bias_corrected_file')]), ]) # fmt: on return workflow -def init_skullstrip_bold_wf(name="skullstrip_bold_wf"): +def init_skullstrip_bold_wf(name='skullstrip_bold_wf'): """ Apply skull-stripping to a BOLD image. @@ -558,38 +550,34 @@ def init_skullstrip_bold_wf(name="skullstrip_bold_wf"): from niworkflows.interfaces.nibabel import ApplyMask workflow = Workflow(name=name) - inputnode = pe.Node(niu.IdentityInterface(fields=["in_file"]), name="inputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') outputnode = pe.Node( - niu.IdentityInterface( - fields=["mask_file", "skull_stripped_file", "out_report"] - ), - name="outputnode", - ) - skullstrip_first_pass = pe.Node( - fsl.BET(frac=0.2, mask=True), name="skullstrip_first_pass" + niu.IdentityInterface(fields=['mask_file', 'skull_stripped_file', 'out_report']), + name='outputnode', ) + skullstrip_first_pass = pe.Node(fsl.BET(frac=0.2, mask=True), name='skullstrip_first_pass') skullstrip_second_pass = pe.Node( - afni.Automask(dilate=1, outputtype="NIFTI_GZ"), name="skullstrip_second_pass" + afni.Automask(dilate=1, outputtype='NIFTI_GZ'), name='skullstrip_second_pass' ) - combine_masks = pe.Node(fsl.BinaryMaths(operation="mul"), name="combine_masks") - apply_mask = pe.Node(ApplyMask(), name="apply_mask") - mask_reportlet = pe.Node(SimpleShowMaskRPT(), name="mask_reportlet") + combine_masks = pe.Node(fsl.BinaryMaths(operation='mul'), name='combine_masks') + apply_mask = pe.Node(ApplyMask(), name='apply_mask') + mask_reportlet = pe.Node(SimpleShowMaskRPT(), name='mask_reportlet') # fmt: off workflow.connect([ - (inputnode, skullstrip_first_pass, [("in_file", "in_file")]), - (skullstrip_first_pass, skullstrip_second_pass, [("out_file", "in_file")]), - (skullstrip_first_pass, combine_masks, [("mask_file", "in_file")]), - (skullstrip_second_pass, combine_masks, [("out_file", "operand_file")]), - (combine_masks, outputnode, [("out_file", "mask_file")]), + (inputnode, skullstrip_first_pass, [('in_file', 'in_file')]), + (skullstrip_first_pass, skullstrip_second_pass, [('out_file', 'in_file')]), + (skullstrip_first_pass, combine_masks, [('mask_file', 'in_file')]), + (skullstrip_second_pass, combine_masks, [('out_file', 'operand_file')]), + (combine_masks, outputnode, [('out_file', 'mask_file')]), # Masked file - (inputnode, apply_mask, [("in_file", "in_file")]), - (combine_masks, apply_mask, [("out_file", "in_mask")]), - (apply_mask, outputnode, [("out_file", "skull_stripped_file")]), + (inputnode, apply_mask, [('in_file', 'in_file')]), + (combine_masks, apply_mask, [('out_file', 'in_mask')]), + (apply_mask, outputnode, [('out_file', 'skull_stripped_file')]), # Reportlet - (inputnode, mask_reportlet, [("in_file", "background_file")]), - (combine_masks, mask_reportlet, [("out_file", "mask_file")]), - (mask_reportlet, outputnode, [("out_report", "out_report")]), + (inputnode, mask_reportlet, [('in_file', 'background_file')]), + (combine_masks, mask_reportlet, [('out_file', 'mask_file')]), + (mask_reportlet, outputnode, [('out_report', 'out_report')]), ]) # fmt: on diff --git a/niworkflows/interfaces/bids.py b/niworkflows/interfaces/bids.py index 6a50417d400..cc72789b94b 100644 --- a/niworkflows/interfaces/bids.py +++ b/niworkflows/interfaces/bids.py @@ -21,56 +21,57 @@ # https://www.nipreps.org/community/licensing/ # """Interfaces for handling BIDS-like neuroimaging structures.""" + +import os +import re +import shutil +import sys from collections import defaultdict from contextlib import suppress from json import dumps, loads from pathlib import Path -import shutil -import os -import re -import sys import nibabel as nb import numpy as np - +import templateflow as tf from nipype import logging from nipype.interfaces.base import ( - traits, - isdefined, - Undefined, - TraitedSpec, BaseInterfaceInputSpec, + Directory, DynamicTraitedSpec, File, - Directory, InputMultiObject, OutputMultiObject, - Str, SimpleInterface, + Str, + TraitedSpec, + Undefined, + isdefined, + traits, ) from nipype.interfaces.io import add_traits from nipype.utils.filemanip import hash_infile -import templateflow as tf + from .. import data from ..utils.bids import _init_layout, relative_to_root from ..utils.images import set_consumables, unsafe_write_nifti_header_and_data from ..utils.misc import _copy_any, unlink -regz = re.compile(r"\.gz$") -_pybids_spec = loads(data.load.readable("nipreps.json").read_text()) -BIDS_DERIV_ENTITIES = _pybids_spec["entities"] -BIDS_DERIV_PATTERNS = tuple(_pybids_spec["default_path_patterns"]) +regz = re.compile(r'\.gz$') +_pybids_spec = loads(data.load.readable('nipreps.json').read_text()) +BIDS_DERIV_ENTITIES = _pybids_spec['entities'] +BIDS_DERIV_PATTERNS = tuple(_pybids_spec['default_path_patterns']) STANDARD_SPACES = tf.api.templates() -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') if sys.version_info < (3, 10): # PY39 builtin_zip = zip - def zip(*args, strict=False): + def zip(*args, strict=False): # noqa: A001 if strict and any(len(args[0]) != len(arg) for arg in args): - raise ValueError("strict_zip() requires all arguments to have the same length") + raise ValueError('strict_zip() requires all arguments to have the same length') return builtin_zip(*args) @@ -82,24 +83,24 @@ def _none(): DEFAULT_DTYPES = defaultdict( _none, ( - ("mask", "uint8"), - ("dseg", "int16"), - ("probseg", "float32"), - ("boldref", "float32"), + ('mask', 'uint8'), + ('dseg', 'int16'), + ('probseg', 'float32'), + ('boldref', 'float32'), ), ) class _BIDSBaseInputSpec(BaseInterfaceInputSpec): bids_dir = traits.Either( - (None, Directory(exists=True)), usedefault=True, desc="optional bids directory" + (None, Directory(exists=True)), usedefault=True, desc='optional bids directory' ) - bids_validate = traits.Bool(True, usedefault=True, desc="enable BIDS validator") - index_db = Directory(exists=True, desc="a PyBIDS layout cache directory") + bids_validate = traits.Bool(True, usedefault=True, desc='enable BIDS validator') + index_db = Directory(exists=True, desc='a PyBIDS layout cache directory') class _BIDSInfoInputSpec(_BIDSBaseInputSpec): - in_file = File(mandatory=True, desc="input file, part of a BIDS tree") + in_file = File(mandatory=True, desc='input file, part of a BIDS tree') class _BIDSInfoOutputSpec(DynamicTraitedSpec): @@ -216,8 +217,7 @@ def _run_interface(self, runtime): pass params = parse_file_entities(in_file) self._results = { - key: params.get(key, Undefined) - for key in _BIDSInfoOutputSpec().get().keys() + key: params.get(key, Undefined) for key in _BIDSInfoOutputSpec().get().keys() } return runtime @@ -228,17 +228,17 @@ class _BIDSDataGrabberInputSpec(BaseInterfaceInputSpec): class _BIDSDataGrabberOutputSpec(TraitedSpec): - out_dict = traits.Dict(desc="output data structure") - fmap = OutputMultiObject(desc="output fieldmaps") - bold = OutputMultiObject(desc="output functional images") - sbref = OutputMultiObject(desc="output sbrefs") - t1w = OutputMultiObject(desc="output T1w images") - roi = OutputMultiObject(desc="output ROI images") - t2w = OutputMultiObject(desc="output T2w images") - flair = OutputMultiObject(desc="output FLAIR images") - pet = OutputMultiObject(desc="output PET images") - dwi = OutputMultiObject(desc="output DWI images") - asl = OutputMultiObject(desc="output ASL images") + out_dict = traits.Dict(desc='output data structure') + fmap = OutputMultiObject(desc='output fieldmaps') + bold = OutputMultiObject(desc='output functional images') + sbref = OutputMultiObject(desc='output sbrefs') + t1w = OutputMultiObject(desc='output T1w images') + roi = OutputMultiObject(desc='output ROI images') + t2w = OutputMultiObject(desc='output T2w images') + flair = OutputMultiObject(desc='output FLAIR images') + pet = OutputMultiObject(desc='output PET images') + dwi = OutputMultiObject(desc='output DWI images') + asl = OutputMultiObject(desc='output ASL images') class BIDSDataGrabber(SimpleInterface): @@ -265,8 +265,8 @@ class BIDSDataGrabber(SimpleInterface): _require_funcs = True def __init__(self, *args, **kwargs): - anat_only = kwargs.pop("anat_only") - anat_derivatives = kwargs.pop("anat_derivatives", None) + anat_only = kwargs.pop('anat_only') + anat_derivatives = kwargs.pop('anat_derivatives', None) super().__init__(*args, **kwargs) if anat_only is not None: self._require_funcs = not anat_only @@ -275,59 +275,54 @@ def __init__(self, *args, **kwargs): def _run_interface(self, runtime): bids_dict = self.inputs.subject_data - self._results["out_dict"] = bids_dict + self._results['out_dict'] = bids_dict self._results.update(bids_dict) if self._require_t1w and not bids_dict['t1w']: raise FileNotFoundError( - "No T1w images found for subject sub-{}".format(self.inputs.subject_id) + f'No T1w images found for subject sub-{self.inputs.subject_id}' ) - if self._require_funcs and not bids_dict["bold"]: + if self._require_funcs and not bids_dict['bold']: raise FileNotFoundError( - "No functional images found for subject sub-{}".format( - self.inputs.subject_id - ) + f'No functional images found for subject sub-{self.inputs.subject_id}' ) - for imtype in ["bold", "t2w", "flair", "fmap", "sbref", "roi", "pet", "asl"]: + for imtype in ['bold', 't2w', 'flair', 'fmap', 'sbref', 'roi', 'pet', 'asl']: if not bids_dict[imtype]: - LOGGER.info( - 'No "%s" images found for sub-%s', imtype, self.inputs.subject_id - ) + LOGGER.info('No "%s" images found for sub-%s', imtype, self.inputs.subject_id) return runtime class _PrepareDerivativeInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - check_hdr = traits.Bool(True, usedefault=True, desc="fix headers of NIfTI outputs") + check_hdr = traits.Bool(True, usedefault=True, desc='fix headers of NIfTI outputs') compress = InputMultiObject( traits.Either(None, traits.Bool), usedefault=True, - desc="whether ``in_file`` should be compressed (True), uncompressed (False) " - "or left unmodified (None, default).", + desc='whether ``in_file`` should be compressed (True), uncompressed (False) ' + 'or left unmodified (None, default).', ) data_dtype = Str( - desc="NumPy datatype to coerce NIfTI data to, or `source` to match the input file dtype" + desc='NumPy datatype to coerce NIfTI data to, or `source` to match the input file dtype' ) dismiss_entities = InputMultiObject( traits.Either(None, Str), usedefault=True, - desc="a list entities that will not be propagated from the source file", + desc='a list entities that will not be propagated from the source file', ) - in_file = InputMultiObject( - File(exists=True), mandatory=True, desc="the object to be saved" - ) - meta_dict = traits.DictStrAny(desc="an input dictionary containing metadata") + in_file = InputMultiObject(File(exists=True), mandatory=True, desc='the object to be saved') + meta_dict = traits.DictStrAny(desc='an input dictionary containing metadata') source_file = InputMultiObject( - File(exists=False), mandatory=True, desc="the source file(s) to extract entities from") + File(exists=False), mandatory=True, desc='the source file(s) to extract entities from' + ) class _PrepareDerivativeOutputSpec(TraitedSpec): - out_file = OutputMultiObject(File(exists=True), desc="derivative file path") - out_meta = traits.DictStrAny(desc="derivative metadata") - out_path = OutputMultiObject(Str, desc="relative path in target directory") - fixed_hdr = traits.List(traits.Bool, desc="whether derivative header was fixed") + out_file = OutputMultiObject(File(exists=True), desc='derivative file path') + out_meta = traits.DictStrAny(desc='derivative metadata') + out_path = OutputMultiObject(Str, desc='relative path in target directory') + fixed_hdr = traits.List(traits.Bool, desc='whether derivative header was fixed') class PrepareDerivative(SimpleInterface): @@ -495,7 +490,7 @@ class PrepareDerivative(SimpleInterface): input_spec = _PrepareDerivativeInputSpec output_spec = _PrepareDerivativeOutputSpec - _config_entities = frozenset({e["name"] for e in BIDS_DERIV_ENTITIES}) + _config_entities = frozenset({e['name'] for e in BIDS_DERIV_ENTITIES}) _config_entities_dict = BIDS_DERIV_ENTITIES _standard_spaces = STANDARD_SPACES _file_patterns = BIDS_DERIV_PATTERNS @@ -503,9 +498,7 @@ class PrepareDerivative(SimpleInterface): def __init__(self, allowed_entities=None, **inputs): """Initialize the SimpleInterface and extend inputs with custom entities.""" - self._allowed_entities = set(allowed_entities or []).union( - set(self._config_entities) - ) + self._allowed_entities = set(allowed_entities or []).union(set(self._config_entities)) self._metadata = {} self._static_traits = self.input_spec.class_editable_traits() + sorted( @@ -522,7 +515,7 @@ def __init__(self, allowed_entities=None, **inputs): setattr(self.inputs, k, inputs[k]) def _run_interface(self, runtime): - from bids.layout import parse_file_entities, Config + from bids.layout import Config, parse_file_entities from bids.layout.writing import build_path from bids.utils import listify @@ -533,47 +526,52 @@ def _run_interface(self, runtime): # Middle precedence: metadata passed to constructor **self._metadata, # Highest precedence: metadata set as inputs - **({ - k: getattr(self.inputs, k) - for k in self.inputs.copyable_trait_names() - if k not in self._static_traits - }) + **( + { + k: getattr(self.inputs, k) + for k in self.inputs.copyable_trait_names() + if k not in self._static_traits + } + ), } in_file = listify(self.inputs.in_file) # Initialize entities with those from the source file. custom_config = Config( - name="custom", + name='custom', entities=self._config_entities_dict, default_path_patterns=self._file_patterns, ) in_entities = [ parse_file_entities( str(relative_to_root(source_file)), - config=["bids", "derivatives", custom_config], + config=['bids', 'derivatives', custom_config], ) for source_file in self.inputs.source_file ] - out_entities = {k: v for k, v in in_entities[0].items() - if all(ent.get(k) == v for ent in in_entities[1:])} + out_entities = { + k: v + for k, v in in_entities[0].items() + if all(ent.get(k) == v for ent in in_entities[1:]) + } for drop_entity in listify(self.inputs.dismiss_entities or []): out_entities.pop(drop_entity, None) # Override extension with that of the input file(s) - out_entities["extension"] = [ + out_entities['extension'] = [ # _splitext does not accept .surf.gii (for instance) - "".join(Path(orig_file).suffixes).lstrip(".") + ''.join(Path(orig_file).suffixes).lstrip('.') for orig_file in in_file ] compress = listify(self.inputs.compress) or [None] if len(compress) == 1: compress = compress * len(in_file) - for i, ext in enumerate(out_entities["extension"]): + for i, ext in enumerate(out_entities['extension']): if compress[i] is not None: - ext = regz.sub("", ext) - out_entities["extension"][i] = f"{ext}.gz" if compress[i] else ext + ext = regz.sub('', ext) + out_entities['extension'][i] = f'{ext}.gz' if compress[i] else ext # Override entities with those set as inputs for key in self._allowed_entities: @@ -582,52 +580,51 @@ def _run_interface(self, runtime): out_entities[key] = value # Clean up native resolution with space - if out_entities.get("resolution") == "native" and out_entities.get("space"): - out_entities.pop("resolution", None) + if out_entities.get('resolution') == 'native' and out_entities.get('space'): + out_entities.pop('resolution', None) # Expand templateflow resolutions - resolution = out_entities.get("resolution") - space = out_entities.get("space") + resolution = out_entities.get('resolution') + space = out_entities.get('space') if resolution: # Standard spaces if space in self._standard_spaces: res = _get_tf_resolution(space, resolution) else: # TODO: Nonstandard? - res = "Unknown" + res = 'Unknown' metadata['Resolution'] = res - if len(set(out_entities["extension"])) == 1: - out_entities["extension"] = out_entities["extension"][0] + if len(set(out_entities['extension'])) == 1: + out_entities['extension'] = out_entities['extension'][0] # Insert custom (non-BIDS) entities from allowed_entities. custom_entities = set(out_entities) - set(self._config_entities) patterns = self._file_patterns if custom_entities: # Example: f"{key}-{{{key}}}" -> "task-{task}" - custom_pat = "_".join(f"{key}-{{{key}}}" for key in sorted(custom_entities)) + custom_pat = '_'.join(f'{key}-{{{key}}}' for key in sorted(custom_entities)) patterns = [ - pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix"))) - for pat in patterns + pat.replace('_{suffix', '_'.join(('', custom_pat, '{suffix'))) for pat in patterns ] # Build the output path(s) dest_files = build_path(out_entities, path_patterns=patterns) if not dest_files: - raise ValueError(f"Could not build path with entities {out_entities}.") + raise ValueError(f'Could not build path with entities {out_entities}.') # Make sure the interpolated values is embedded in a list, and check dest_files = listify(dest_files) if len(in_file) != len(dest_files): raise ValueError( - f"Input files ({len(in_file)}) not matched " - f"by interpolated patterns ({len(dest_files)})." + f'Input files ({len(in_file)}) not matched ' + f'by interpolated patterns ({len(dest_files)}).' ) # Prepare SimpleInterface outputs object - self._results["out_file"] = [] - self._results["fixed_hdr"] = [False] * len(in_file) - self._results["out_path"] = dest_files - self._results["out_meta"] = metadata + self._results['out_file'] = [] + self._results['fixed_hdr'] = [False] * len(in_file) + self._results['out_path'] = dest_files + self._results['out_meta'] = metadata for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)): # Set data and header iff changes need to be made. If these are @@ -640,9 +637,9 @@ def _run_interface(self, runtime): new_compression = False if is_nifti: - new_compression = ( - os.fspath(orig_file).endswith(".gz") ^ os.fspath(dest_file).endswith(".gz") - ) + new_compression = os.fspath(orig_file).endswith('.gz') ^ os.fspath( + dest_file + ).endswith('.gz') data_dtype = self.inputs.data_dtype or self._default_dtypes[self.inputs.suffix] if is_nifti and any((self.inputs.check_hdr, data_dtype)): @@ -651,39 +648,37 @@ def _run_interface(self, runtime): if self.inputs.check_hdr: hdr = nii.header curr_units = tuple( - [None if u == "unknown" else u for u in hdr.get_xyzt_units()] + [None if u == 'unknown' else u for u in hdr.get_xyzt_units()] ) - curr_codes = (int(hdr["qform_code"]), int(hdr["sform_code"])) + curr_codes = (int(hdr['qform_code']), int(hdr['sform_code'])) # Default to mm, use sec if data type is bold units = ( - curr_units[0] or "mm", - "sec" if out_entities["suffix"] == "bold" else None, + curr_units[0] or 'mm', + 'sec' if out_entities['suffix'] == 'bold' else None, ) xcodes = (1, 1) # Derivative in its original scanner space if self.inputs.space: - xcodes = ( - (4, 4) if self.inputs.space in self._standard_spaces else (2, 2) - ) + xcodes = (4, 4) if self.inputs.space in self._standard_spaces else (2, 2) curr_zooms = zooms = hdr.get_zooms() - if "RepetitionTime" in self.inputs.get(): + if 'RepetitionTime' in self.inputs.get(): zooms = curr_zooms[:3] + (self.inputs.RepetitionTime,) if (curr_codes, curr_units, curr_zooms) != (xcodes, units, zooms): - self._results["fixed_hdr"][i] = True + self._results['fixed_hdr'][i] = True new_header = hdr.copy() new_header.set_qform(nii.affine, xcodes[0]) new_header.set_sform(nii.affine, xcodes[1]) new_header.set_xyzt_units(*units) new_header.set_zooms(zooms) - if data_dtype == "source": # match source dtype + if data_dtype == 'source': # match source dtype try: data_dtype = nb.load(self.inputs.source_file[0]).get_data_dtype() - except Exception: + except Exception: # noqa: BLE001 LOGGER.warning( - f"Could not get data type of file {self.inputs.source_file[0]}" + f'Could not get data type of file {self.inputs.source_file[0]}' ) data_dtype = None @@ -692,8 +687,8 @@ def _run_interface(self, runtime): orig_dtype = nii.get_data_dtype() if orig_dtype != data_dtype: LOGGER.warning( - f"Changing {Path(dest_file).name} dtype " - f"from {orig_dtype} to {data_dtype}" + f'Changing {Path(dest_file).name} dtype ' + f'from {orig_dtype} to {data_dtype}' ) # coerce dataobj to new data dtype if np.issubdtype(data_dtype, np.integer): @@ -722,35 +717,31 @@ def _run_interface(self, runtime): else: # Without this, we would be writing nans # This is our punishment for hacking around nibabel defaults - new_header.set_slope_inter(slope=1., inter=0.) + new_header.set_slope_inter(slope=1.0, inter=0.0) unsafe_write_nifti_header_and_data( - fname=out_file, - header=new_header, - data=new_data + fname=out_file, header=new_header, data=new_data ) del orig_img - self._results["out_file"].append(str(out_file)) + self._results['out_file'].append(str(out_file)) return runtime class _SaveDerivativeInputSpec(TraitedSpec): base_directory = Directory( - exists=True, mandatory=True, desc="Path to the base directory for storing data." - ) - in_file = InputMultiObject( - File(exists=True), mandatory=True, desc="the object to be saved" + exists=True, mandatory=True, desc='Path to the base directory for storing data.' ) - metadata = traits.DictStrAny(desc="metadata to be saved alongside the file") + in_file = InputMultiObject(File(exists=True), mandatory=True, desc='the object to be saved') + metadata = traits.DictStrAny(desc='metadata to be saved alongside the file') relative_path = InputMultiObject( - traits.Str, desc="path to the file relative to the base directory" + traits.Str, desc='path to the file relative to the base directory' ) class _SaveDerivativeOutputSpec(TraitedSpec): - out_file = OutputMultiObject(File, desc="written file path") - out_meta = OutputMultiObject(File, desc="written JSON sidecar path") + out_file = OutputMultiObject(File, desc='written file path') + out_meta = OutputMultiObject(File, desc='written JSON sidecar path') class SaveDerivative(SimpleInterface): @@ -763,16 +754,18 @@ class SaveDerivative(SimpleInterface): This ensures that changes to the output directory metadata (e.g., mtime) do not trigger unnecessary recomputations in the workflow. """ + input_spec = _SaveDerivativeInputSpec output_spec = _SaveDerivativeOutputSpec _always_run = True def _run_interface(self, runtime): - self._results["out_file"] = [] - self._results["out_meta"] = [] + self._results['out_file'] = [] + self._results['out_meta'] = [] for in_file, relative_path in zip( - self.inputs.in_file, self.inputs.relative_path, + self.inputs.in_file, + self.inputs.relative_path, strict=True, ): out_file = Path(self.inputs.base_directory) / relative_path @@ -785,49 +778,45 @@ def _run_interface(self, runtime): sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json" sidecar.unlink(missing_ok=True) sidecar.write_text(dumps(self.inputs.metadata, indent=2)) - self._results["out_meta"].append(str(sidecar)) - self._results["out_file"].append(str(out_file)) + self._results['out_meta'].append(str(sidecar)) + self._results['out_file'].append(str(out_file)) return runtime class _DerivativesDataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - base_directory = traits.Directory( - desc="Path to the base directory for storing data." - ) - check_hdr = traits.Bool(True, usedefault=True, desc="fix headers of NIfTI outputs") + base_directory = traits.Directory(desc='Path to the base directory for storing data.') + check_hdr = traits.Bool(True, usedefault=True, desc='fix headers of NIfTI outputs') compress = InputMultiObject( traits.Either(None, traits.Bool), usedefault=True, - desc="whether ``in_file`` should be compressed (True), uncompressed (False) " - "or left unmodified (None, default).", + desc='whether ``in_file`` should be compressed (True), uncompressed (False) ' + 'or left unmodified (None, default).', ) data_dtype = Str( - desc="NumPy datatype to coerce NIfTI data to, or `source` to" - "match the input file dtype" + desc='NumPy datatype to coerce NIfTI data to, or `source` to match the input file dtype' ) dismiss_entities = InputMultiObject( traits.Either(None, Str), usedefault=True, - desc="a list entities that will not be propagated from the source file", + desc='a list entities that will not be propagated from the source file', ) - in_file = InputMultiObject( - File(exists=True), mandatory=True, desc="the object to be saved" - ) - meta_dict = traits.DictStrAny(desc="an input dictionary containing metadata") + in_file = InputMultiObject(File(exists=True), mandatory=True, desc='the object to be saved') + meta_dict = traits.DictStrAny(desc='an input dictionary containing metadata') source_file = InputMultiObject( - File(exists=False), mandatory=True, desc="the source file(s) to extract entities from") + File(exists=False), mandatory=True, desc='the source file(s) to extract entities from' + ) class _DerivativesDataSinkOutputSpec(TraitedSpec): - out_file = OutputMultiObject(File(exists=True, desc="written file path")) - out_meta = OutputMultiObject(File(exists=True, desc="written JSON sidecar path")) + out_file = OutputMultiObject(File(exists=True, desc='written file path')) + out_meta = OutputMultiObject(File(exists=True, desc='written JSON sidecar path')) compression = OutputMultiObject( traits.Either(None, traits.Bool), - desc="whether ``in_file`` should be compressed (True), uncompressed (False) " - "or left unmodified (None).", + desc='whether ``in_file`` should be compressed (True), uncompressed (False) ' + 'or left unmodified (None).', ) - fixed_hdr = traits.List(traits.Bool, desc="whether derivative header was fixed") + fixed_hdr = traits.List(traits.Bool, desc='whether derivative header was fixed') class DerivativesDataSink(SimpleInterface): @@ -1004,9 +993,9 @@ class DerivativesDataSink(SimpleInterface): input_spec = _DerivativesDataSinkInputSpec output_spec = _DerivativesDataSinkOutputSpec - out_path_base = "niworkflows" + out_path_base = 'niworkflows' _always_run = True - _config_entities = frozenset({e["name"] for e in BIDS_DERIV_ENTITIES}) + _config_entities = frozenset({e['name'] for e in BIDS_DERIV_ENTITIES}) _config_entities_dict = BIDS_DERIV_ENTITIES _standard_spaces = STANDARD_SPACES _file_patterns = BIDS_DERIV_PATTERNS @@ -1014,9 +1003,7 @@ class DerivativesDataSink(SimpleInterface): def __init__(self, allowed_entities=None, out_path_base=None, **inputs): """Initialize the SimpleInterface and extend inputs with custom entities.""" - self._allowed_entities = set(allowed_entities or []).union( - set(self._config_entities) - ) + self._allowed_entities = set(allowed_entities or []).union(set(self._config_entities)) if out_path_base: self.out_path_base = out_path_base @@ -1035,7 +1022,7 @@ def __init__(self, allowed_entities=None, out_path_base=None, **inputs): setattr(self.inputs, k, inputs[k]) def _run_interface(self, runtime): - from bids.layout import parse_file_entities, Config + from bids.layout import Config, parse_file_entities from bids.layout.writing import build_path from bids.utils import listify @@ -1059,36 +1046,39 @@ def _run_interface(self, runtime): # Initialize entities with those from the source file. custom_config = Config( - name="custom", + name='custom', entities=self._config_entities_dict, default_path_patterns=self._file_patterns, ) in_entities = [ parse_file_entities( str(relative_to_root(source_file)), - config=["bids", "derivatives", custom_config], + config=['bids', 'derivatives', custom_config], ) for source_file in self.inputs.source_file ] - out_entities = {k: v for k, v in in_entities[0].items() - if all(ent.get(k) == v for ent in in_entities[1:])} + out_entities = { + k: v + for k, v in in_entities[0].items() + if all(ent.get(k) == v for ent in in_entities[1:]) + } for drop_entity in listify(self.inputs.dismiss_entities or []): out_entities.pop(drop_entity, None) # Override extension with that of the input file(s) - out_entities["extension"] = [ + out_entities['extension'] = [ # _splitext does not accept .surf.gii (for instance) - "".join(Path(orig_file).suffixes).lstrip(".") + ''.join(Path(orig_file).suffixes).lstrip('.') for orig_file in in_file ] compress = listify(self.inputs.compress) or [None] if len(compress) == 1: compress = compress * len(in_file) - for i, ext in enumerate(out_entities["extension"]): + for i, ext in enumerate(out_entities['extension']): if compress[i] is not None: - ext = regz.sub("", ext) - out_entities["extension"][i] = f"{ext}.gz" if compress[i] else ext + ext = regz.sub('', ext) + out_entities['extension'][i] = f'{ext}.gz' if compress[i] else ext # Override entities with those set as inputs for key in self._allowed_entities: @@ -1097,56 +1087,55 @@ def _run_interface(self, runtime): out_entities[key] = value # Clean up native resolution with space - if out_entities.get("resolution") == "native" and out_entities.get("space"): - out_entities.pop("resolution", None) + if out_entities.get('resolution') == 'native' and out_entities.get('space'): + out_entities.pop('resolution', None) # Expand templateflow resolutions - resolution = out_entities.get("resolution") - space = out_entities.get("space") + resolution = out_entities.get('resolution') + space = out_entities.get('space') if resolution: # Standard spaces if space in self._standard_spaces: res = _get_tf_resolution(space, resolution) else: # TODO: Nonstandard? - res = "Unknown" + res = 'Unknown' self._metadata['Resolution'] = res - if len(set(out_entities["extension"])) == 1: - out_entities["extension"] = out_entities["extension"][0] + if len(set(out_entities['extension'])) == 1: + out_entities['extension'] = out_entities['extension'][0] # Insert custom (non-BIDS) entities from allowed_entities. custom_entities = set(out_entities) - set(self._config_entities) patterns = self._file_patterns if custom_entities: # Example: f"{key}-{{{key}}}" -> "task-{task}" - custom_pat = "_".join(f"{key}-{{{key}}}" for key in sorted(custom_entities)) + custom_pat = '_'.join(f'{key}-{{{key}}}' for key in sorted(custom_entities)) patterns = [ - pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix"))) - for pat in patterns + pat.replace('_{suffix', '_'.join(('', custom_pat, '{suffix'))) for pat in patterns ] # Prepare SimpleInterface outputs object - self._results["out_file"] = [] - self._results["compression"] = [] - self._results["fixed_hdr"] = [False] * len(in_file) + self._results['out_file'] = [] + self._results['compression'] = [] + self._results['fixed_hdr'] = [False] * len(in_file) dest_files = build_path(out_entities, path_patterns=patterns) if not dest_files: - raise ValueError(f"Could not build path with entities {out_entities}.") + raise ValueError(f'Could not build path with entities {out_entities}.') # Make sure the interpolated values is embedded in a list, and check dest_files = listify(dest_files) if len(in_file) != len(dest_files): raise ValueError( - f"Input files ({len(in_file)}) not matched " - f"by interpolated patterns ({len(dest_files)})." + f'Input files ({len(in_file)}) not matched ' + f'by interpolated patterns ({len(dest_files)}).' ) for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)): out_file = out_path / dest_file out_file.parent.mkdir(exist_ok=True, parents=True) - self._results["out_file"].append(str(out_file)) - self._results["compression"].append(str(dest_file).endswith(".gz")) + self._results['out_file'].append(str(out_file)) + self._results['compression'].append(str(dest_file).endswith('.gz')) # An odd but possible case is that an input file is in the location of # the output and we have made no changes to it. @@ -1176,39 +1165,37 @@ def _run_interface(self, runtime): if self.inputs.check_hdr: hdr = nii.header curr_units = tuple( - [None if u == "unknown" else u for u in hdr.get_xyzt_units()] + [None if u == 'unknown' else u for u in hdr.get_xyzt_units()] ) - curr_codes = (int(hdr["qform_code"]), int(hdr["sform_code"])) + curr_codes = (int(hdr['qform_code']), int(hdr['sform_code'])) # Default to mm, use sec if data type is bold units = ( - curr_units[0] or "mm", - "sec" if out_entities["suffix"] == "bold" else None, + curr_units[0] or 'mm', + 'sec' if out_entities['suffix'] == 'bold' else None, ) xcodes = (1, 1) # Derivative in its original scanner space if self.inputs.space: - xcodes = ( - (4, 4) if self.inputs.space in self._standard_spaces else (2, 2) - ) + xcodes = (4, 4) if self.inputs.space in self._standard_spaces else (2, 2) curr_zooms = zooms = hdr.get_zooms() - if "RepetitionTime" in self.inputs.get(): + if 'RepetitionTime' in self.inputs.get(): zooms = curr_zooms[:3] + (self.inputs.RepetitionTime,) if (curr_codes, curr_units, curr_zooms) != (xcodes, units, zooms): - self._results["fixed_hdr"][i] = True + self._results['fixed_hdr'][i] = True new_header = hdr.copy() new_header.set_qform(nii.affine, xcodes[0]) new_header.set_sform(nii.affine, xcodes[1]) new_header.set_xyzt_units(*units) new_header.set_zooms(zooms) - if data_dtype == "source": # match source dtype + if data_dtype == 'source': # match source dtype try: data_dtype = nb.load(self.inputs.source_file[0]).get_data_dtype() - except Exception: + except Exception: # noqa: BLE001 LOGGER.warning( - f"Could not get data type of file {self.inputs.source_file[0]}" + f'Could not get data type of file {self.inputs.source_file[0]}' ) data_dtype = None @@ -1217,7 +1204,7 @@ def _run_interface(self, runtime): orig_dtype = nii.get_data_dtype() if orig_dtype != data_dtype: LOGGER.warning( - f"Changing {out_file} dtype from {orig_dtype} to {data_dtype}" + f'Changing {out_file} dtype from {orig_dtype} to {data_dtype}' ) # coerce dataobj to new data dtype if np.issubdtype(data_dtype, np.integer): @@ -1241,33 +1228,27 @@ def _run_interface(self, runtime): else: # Without this, we would be writing nans # This is our punishment for hacking around nibabel defaults - new_header.set_slope_inter(slope=1., inter=0.) + new_header.set_slope_inter(slope=1.0, inter=0.0) unsafe_write_nifti_header_and_data( - fname=out_file, - header=new_header, - data=new_data + fname=out_file, header=new_header, data=new_data ) del orig_img - if len(self._results["out_file"]) == 1: + if len(self._results['out_file']) == 1: meta_fields = self.inputs.copyable_trait_names() self._metadata.update( - { - k: getattr(self.inputs, k) - for k in meta_fields - if k not in self._static_traits - } + {k: getattr(self.inputs, k) for k in meta_fields if k not in self._static_traits} ) if self._metadata: sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json" unlink(sidecar, missing_ok=True) sidecar.write_text(dumps(self._metadata, sort_keys=True, indent=2)) - self._results["out_meta"] = str(sidecar) + self._results['out_meta'] = str(sidecar) return runtime class _ReadSidecarJSONInputSpec(_BIDSBaseInputSpec): - in_file = File(exists=True, mandatory=True, desc="the input nifti file") + in_file = File(exists=True, mandatory=True, desc='the input nifti file') class _ReadSidecarJSONOutputSpec(_BIDSInfoOutputSpec): @@ -1342,57 +1323,49 @@ def _run_interface(self, runtime): self.inputs.in_file, self.layout, self.inputs.bids_validate, - database_path=( - self.inputs.index_db if isdefined(self.inputs.index_db) - else None - ) + database_path=(self.inputs.index_db if isdefined(self.inputs.index_db) else None), ) # Fill in BIDS entities of the output ("*_id") output_keys = list(_BIDSInfoOutputSpec().get().keys()) params = self.layout.parse_file_entities(self.inputs.in_file) - self._results = { - key: params.get(key.split("_")[0], Undefined) for key in output_keys - } + self._results = {key: params.get(key.split('_')[0], Undefined) for key in output_keys} # Fill in metadata metadata = self.layout.get_metadata(self.inputs.in_file) - self._results["out_dict"] = metadata + self._results['out_dict'] = metadata # Set dynamic outputs if fields input is present for fname in self._fields: if not self._undef_fields and fname not in metadata: raise KeyError( - 'Metadata field "%s" not found for file %s' - % (fname, self.inputs.in_file) + f'Metadata field "{fname}" not found for file {self.inputs.in_file}' ) self._results[fname] = metadata.get(fname, Undefined) return runtime class _BIDSFreeSurferDirInputSpec(BaseInterfaceInputSpec): - derivatives = Directory( - exists=True, mandatory=True, desc="BIDS derivatives directory" - ) + derivatives = Directory(exists=True, mandatory=True, desc='BIDS derivatives directory') freesurfer_home = Directory( - exists=True, mandatory=True, desc="FreeSurfer installation directory" + exists=True, mandatory=True, desc='FreeSurfer installation directory' ) subjects_dir = traits.Either( traits.Str(), Directory(), - default="freesurfer", + default='freesurfer', usedefault=True, - desc="Name of FreeSurfer subjects directory", + desc='Name of FreeSurfer subjects directory', ) - spaces = traits.List(traits.Str, desc="Set of output spaces to prepare") + spaces = traits.List(traits.Str, desc='Set of output spaces to prepare') overwrite_fsaverage = traits.Bool( - False, usedefault=True, desc="Overwrite fsaverage directories, if present" + False, usedefault=True, desc='Overwrite fsaverage directories, if present' ) - minimum_fs_version = traits.Enum("7.0.0", desc="Minimum FreeSurfer version for compatibility") + minimum_fs_version = traits.Enum('7.0.0', desc='Minimum FreeSurfer version for compatibility') class _BIDSFreeSurferDirOutputSpec(TraitedSpec): - subjects_dir = traits.Directory(exists=True, desc="FreeSurfer subjects directory") + subjects_dir = traits.Directory(exists=True, desc='FreeSurfer subjects directory') class BIDSFreeSurferDir(SimpleInterface): @@ -1425,9 +1398,9 @@ def _run_interface(self, runtime): if not subjects_dir.is_absolute(): subjects_dir = Path(self.inputs.derivatives) / subjects_dir subjects_dir.mkdir(parents=True, exist_ok=True) - self._results["subjects_dir"] = str(subjects_dir) + self._results['subjects_dir'] = str(subjects_dir) - orig_subjects_dir = Path(self.inputs.freesurfer_home) / "subjects" + orig_subjects_dir = Path(self.inputs.freesurfer_home) / 'subjects' # Source is target, so just quit if subjects_dir == orig_subjects_dir: @@ -1435,12 +1408,12 @@ def _run_interface(self, runtime): spaces = list(self.inputs.spaces) # Always copy fsaverage, for proper recon-all functionality - if "fsaverage" not in spaces: - spaces.append("fsaverage") + if 'fsaverage' not in spaces: + spaces.append('fsaverage') for space in spaces: # Skip non-freesurfer spaces and fsnative - if not space.startswith("fsaverage"): + if not space.startswith('fsaverage'): continue source = orig_subjects_dir / space dest = subjects_dir / space @@ -1450,12 +1423,12 @@ def _run_interface(self, runtime): if dest.exists(): continue else: - raise FileNotFoundError("Expected to find '%s' to copy" % source) + raise FileNotFoundError(f"Expected to find '{source}' to copy") if ( space == 'fsaverage' and dest.exists() - and self.inputs.minimum_fs_version == "7.0.0" + and self.inputs.minimum_fs_version == '7.0.0' ): label = dest / 'label' / 'rh.FG1.mpm.vpnl.label' # new in FS7 if not label.exists(): @@ -1475,8 +1448,8 @@ def _run_interface(self, runtime): shutil.copytree(source, dest, copy_function=shutil.copy) except FileExistsError: LOGGER.warning( - "%s exists; if multiple jobs are running in parallel" - ", this can be safely ignored", + '%s exists; if multiple jobs are running in parallel' + ', this can be safely ignored', dest, ) @@ -1506,11 +1479,11 @@ def _get_tf_resolution(space: str, resolution: str) -> str: if r in resolutions: res_meta = resolutions[r] if res_meta is None: - return "Unknown" + return 'Unknown' def _fmt_xyz(coords: list) -> str: - xyz = "x".join([str(c) for c in coords]) - return f"{xyz} mm^3" + xyz = 'x'.join([str(c) for c in coords]) + return f'{xyz} mm^3' return ( f"Template {space} ({_fmt_xyz(res_meta['zooms'])})," diff --git a/niworkflows/interfaces/bold.py b/niworkflows/interfaces/bold.py index ce373115c5a..dc2c17fd1c6 100644 --- a/niworkflows/interfaces/bold.py +++ b/niworkflows/interfaces/bold.py @@ -21,45 +21,47 @@ # https://www.nipreps.org/community/licensing/ # """Utilities for BOLD fMRI imaging.""" -import numpy as np + import nibabel as nb +import numpy as np from nipype import logging from nipype.interfaces.base import ( - traits, - TraitedSpec, BaseInterfaceInputSpec, - SimpleInterface, File, + SimpleInterface, + TraitedSpec, + traits, ) -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _NonsteadyStatesDetectorInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="BOLD fMRI timeseries") - nonnegative = traits.Bool(True, usedefault=True, - desc="whether image voxels must be nonnegative") + in_file = File(exists=True, mandatory=True, desc='BOLD fMRI timeseries') + nonnegative = traits.Bool( + True, usedefault=True, desc='whether image voxels must be nonnegative' + ) n_volumes = traits.Range( value=40, low=10, high=200, usedefault=True, - desc="drop volumes in 4D image beyond this timepoint", + desc='drop volumes in 4D image beyond this timepoint', ) zero_dummy_masked = traits.Range( value=20, low=2, high=40, usedefault=True, - desc="number of timepoints to average when the number of dummies is zero" + desc='number of timepoints to average when the number of dummies is zero', ) class _NonsteadyStatesDetectorOutputSpec(TraitedSpec): t_mask = traits.List( - traits.Bool, desc="list of nonsteady-states (True) and stable (False) volumes" + traits.Bool, desc='list of nonsteady-states (True) and stable (False) volumes' ) - n_dummy = traits.Int(desc="number of volumes identified as nonsteady states") + n_dummy = traits.Int(desc='number of volumes identified as nonsteady states') class NonsteadyStatesDetector(SimpleInterface): @@ -75,28 +77,28 @@ def _run_interface(self, runtime): t_mask = np.zeros((ntotal,), dtype=bool) if ntotal == 1: - self._results["t_mask"] = [True] - self._results["n_dummy"] = 1 + self._results['t_mask'] = [True] + self._results['n_dummy'] = 1 return runtime from nipype.algorithms.confounds import is_outlier - data = img.get_fdata(dtype="float32")[..., :self.inputs.n_volumes] + data = img.get_fdata(dtype='float32')[..., : self.inputs.n_volumes] # Data can come with outliers showing very high numbers - preemptively prune data = np.clip( data, a_min=0.0 if self.inputs.nonnegative else np.percentile(data, 0.2), a_max=np.percentile(data, 99.8), ) - self._results["n_dummy"] = is_outlier(np.mean(data, axis=(0, 1, 2))) + self._results['n_dummy'] = is_outlier(np.mean(data, axis=(0, 1, 2))) start = 0 - stop = self._results["n_dummy"] + stop = self._results['n_dummy'] if stop < 2: stop = min(ntotal, self.inputs.n_volumes) start = max(0, stop - self.inputs.zero_dummy_masked) t_mask[start:stop] = True - self._results["t_mask"] = t_mask.tolist() + self._results['t_mask'] = t_mask.tolist() return runtime diff --git a/niworkflows/interfaces/cifti.py b/niworkflows/interfaces/cifti.py index f7928029ccf..c61b4c45b4f 100644 --- a/niworkflows/interfaces/cifti.py +++ b/niworkflows/interfaces/cifti.py @@ -21,94 +21,93 @@ # https://www.nipreps.org/community/licensing/ # """Handling connectivity: combines FreeSurfer surfaces with subcortical volumes.""" -from pathlib import Path + +from __future__ import annotations + import json -import typing import warnings +from pathlib import Path import nibabel as nb -from nibabel import cifti2 as ci import numpy as np +import templateflow.api as tf +from nibabel import cifti2 as ci from nilearn.image import resample_to_img -from nipype.utils.filemanip import split_filename from nipype.interfaces.base import ( BaseInterfaceInputSpec, - TraitedSpec, File, - traits, SimpleInterface, + TraitedSpec, + traits, ) -import templateflow.api as tf +from nipype.utils.filemanip import split_filename from niworkflows.interfaces.nibabel import reorient_image - CIFTI_STRUCT_WITH_LABELS = { # CITFI structures with corresponding labels # SURFACES - "CIFTI_STRUCTURE_CORTEX_LEFT": None, - "CIFTI_STRUCTURE_CORTEX_RIGHT": None, + 'CIFTI_STRUCTURE_CORTEX_LEFT': None, + 'CIFTI_STRUCTURE_CORTEX_RIGHT': None, # SUBCORTICAL - "CIFTI_STRUCTURE_ACCUMBENS_LEFT": (26,), - "CIFTI_STRUCTURE_ACCUMBENS_RIGHT": (58,), - "CIFTI_STRUCTURE_AMYGDALA_LEFT": (18,), - "CIFTI_STRUCTURE_AMYGDALA_RIGHT": (54,), - "CIFTI_STRUCTURE_BRAIN_STEM": (16,), - "CIFTI_STRUCTURE_CAUDATE_LEFT": (11,), - "CIFTI_STRUCTURE_CAUDATE_RIGHT": (50,), - "CIFTI_STRUCTURE_CEREBELLUM_LEFT": (8,), # HCP MNI152 - "CIFTI_STRUCTURE_CEREBELLUM_RIGHT": (47,), # HCP MNI152 - "CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT": (28,), - "CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT": (60,), - "CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT": (17,), - "CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT": (53,), - "CIFTI_STRUCTURE_PALLIDUM_LEFT": (13,), - "CIFTI_STRUCTURE_PALLIDUM_RIGHT": (52,), - "CIFTI_STRUCTURE_PUTAMEN_LEFT": (12,), - "CIFTI_STRUCTURE_PUTAMEN_RIGHT": (51,), - "CIFTI_STRUCTURE_THALAMUS_LEFT": (10,), - "CIFTI_STRUCTURE_THALAMUS_RIGHT": (49,), + 'CIFTI_STRUCTURE_ACCUMBENS_LEFT': (26,), + 'CIFTI_STRUCTURE_ACCUMBENS_RIGHT': (58,), + 'CIFTI_STRUCTURE_AMYGDALA_LEFT': (18,), + 'CIFTI_STRUCTURE_AMYGDALA_RIGHT': (54,), + 'CIFTI_STRUCTURE_BRAIN_STEM': (16,), + 'CIFTI_STRUCTURE_CAUDATE_LEFT': (11,), + 'CIFTI_STRUCTURE_CAUDATE_RIGHT': (50,), + 'CIFTI_STRUCTURE_CEREBELLUM_LEFT': (8,), # HCP MNI152 + 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT': (47,), # HCP MNI152 + 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_LEFT': (28,), + 'CIFTI_STRUCTURE_DIENCEPHALON_VENTRAL_RIGHT': (60,), + 'CIFTI_STRUCTURE_HIPPOCAMPUS_LEFT': (17,), + 'CIFTI_STRUCTURE_HIPPOCAMPUS_RIGHT': (53,), + 'CIFTI_STRUCTURE_PALLIDUM_LEFT': (13,), + 'CIFTI_STRUCTURE_PALLIDUM_RIGHT': (52,), + 'CIFTI_STRUCTURE_PUTAMEN_LEFT': (12,), + 'CIFTI_STRUCTURE_PUTAMEN_RIGHT': (51,), + 'CIFTI_STRUCTURE_THALAMUS_LEFT': (10,), + 'CIFTI_STRUCTURE_THALAMUS_RIGHT': (49,), } class _GenerateCiftiInputSpec(BaseInterfaceInputSpec): - bold_file = File(mandatory=True, exists=True, desc="input BOLD file") + bold_file = File(mandatory=True, exists=True, desc='input BOLD file') volume_target = traits.Enum( - "MNI152NLin6Asym", + 'MNI152NLin6Asym', usedefault=True, - desc="CIFTI volumetric output space", + desc='CIFTI volumetric output space', ) surface_target = traits.Enum( - "fsLR", + 'fsLR', usedefault=True, - desc="CIFTI surface target space", - ) - grayordinates = traits.Enum( - "91k", "170k", usedefault=True, desc="Final CIFTI grayordinates" + desc='CIFTI surface target space', ) - TR = traits.Float(mandatory=True, desc="Repetition time") + grayordinates = traits.Enum('91k', '170k', usedefault=True, desc='Final CIFTI grayordinates') + TR = traits.Float(mandatory=True, desc='Repetition time') surface_bolds = traits.List( File(exists=True), mandatory=True, - desc="list of surface BOLD GIFTI files (length 2 with order [L,R])", + desc='list of surface BOLD GIFTI files (length 2 with order [L,R])', ) class _GenerateCiftiOutputSpec(TraitedSpec): - out_file = File(desc="generated CIFTI file") - out_metadata = File(desc="CIFTI metadata JSON") + out_file = File(desc='generated CIFTI file') + out_metadata = File(desc='CIFTI metadata JSON') class GenerateCifti(SimpleInterface): """ Generate a HCP-style CIFTI image from BOLD file in target spaces. """ + input_spec = _GenerateCiftiInputSpec output_spec = _GenerateCiftiOutputSpec def _run_interface(self, runtime): - surface_labels, volume_labels, metadata = _prepare_cifti(self.inputs.grayordinates) - self._results["out_file"] = _create_cifti_image( + self._results['out_file'] = _create_cifti_image( self.inputs.bold_file, volume_labels, self.inputs.surface_bolds, @@ -116,22 +115,22 @@ def _run_interface(self, runtime): self.inputs.TR, metadata, ) - metadata_file = Path("bold.dtseries.json").absolute() + metadata_file = Path('bold.dtseries.json').absolute() metadata_file.write_text(json.dumps(metadata, indent=2)) - self._results["out_metadata"] = str(metadata_file) + self._results['out_metadata'] = str(metadata_file) return runtime class _CiftiNameSourceInputSpec(BaseInterfaceInputSpec): space = traits.Str( mandatory=True, - desc="the space identifier", + desc='the space identifier', ) - density = traits.Str(desc="density label") + density = traits.Str(desc='density label') class _CiftiNameSourceOutputSpec(TraitedSpec): - out_name = traits.Str(desc="(partial) filename formatted according to template") + out_name = traits.Str(desc='(partial) filename formatted according to template') class CiftiNameSource(SimpleInterface): @@ -162,11 +161,11 @@ def _run_interface(self, runtime): entities.append(('den', self.inputs.density)) out_name = '_'.join([f'{k}-{v}' for k, v in entities] + ['bold.dtseries']) - self._results["out_name"] = out_name + self._results['out_name'] = out_name return runtime -def _prepare_cifti(grayordinates: str) -> typing.Tuple[list, str, dict]: +def _prepare_cifti(grayordinates: str) -> tuple[list, str, dict]: """ Fetch the required templates needed for CIFTI-2 generation, based on input surface density. @@ -199,21 +198,11 @@ def _prepare_cifti(grayordinates: str) -> typing.Tuple[list, str, dict]: """ grayord_key = { - "91k": { - "surface-den": "32k", - "tf-res": "02", - "grayords": "91,282", - "res-mm": "2mm" - }, - "170k": { - "surface-den": "59k", - "tf-res": "06", - "grayords": "170,494", - "res-mm": "1.6mm" - } + '91k': {'surface-den': '32k', 'tf-res': '02', 'grayords': '91,282', 'res-mm': '2mm'}, + '170k': {'surface-den': '59k', 'tf-res': '06', 'grayords': '170,494', 'res-mm': '1.6mm'}, } if grayordinates not in grayord_key: - raise NotImplementedError("Grayordinates {grayordinates} is not supported.") + raise NotImplementedError('Grayordinates {grayordinates} is not supported.') tf_vol_res = grayord_key[grayordinates]['tf-res'] total_grayords = grayord_key[grayordinates]['grayords'] @@ -223,41 +212,37 @@ def _prepare_cifti(grayordinates: str) -> typing.Tuple[list, str, dict]: surface_labels = [ str( tf.get( - "fsLR", + 'fsLR', density=surface_density, hemi=hemi, - desc="nomedialwall", - suffix="dparc", + desc='nomedialwall', + suffix='dparc', raise_empty=True, ) ) - for hemi in ("L", "R") + for hemi in ('L', 'R') ] volume_label = str( tf.get( - "MNI152NLin6Asym", - suffix="dseg", - atlas="HCP", - resolution=tf_vol_res, - raise_empty=True + 'MNI152NLin6Asym', suffix='dseg', atlas='HCP', resolution=tf_vol_res, raise_empty=True ) ) - tf_url = "https://templateflow.s3.amazonaws.com" - volume_url = f"{tf_url}/tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-{tf_vol_res}_T1w.nii.gz" + tf_url = 'https://templateflow.s3.amazonaws.com' + volume_url = f'{tf_url}/tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-{tf_vol_res}_T1w.nii.gz' surfaces_url = ( # midthickness is the default, but varying levels of inflation are all valid - f"{tf_url}/tpl-fsLR/tpl-fsLR_den-{surface_density}_hemi-%s_midthickness.surf.gii" + f'{tf_url}/tpl-fsLR/tpl-fsLR_den-{surface_density}_hemi-%s_midthickness.surf.gii' ) metadata = { - "Density": ( - f"{total_grayords} grayordinates corresponding to all of the grey matter sampled at a " - f"{res_mm} average vertex spacing on the surface and as {res_mm} voxels subcortically" + 'Density': ( + f'{total_grayords} grayordinates corresponding to all of the grey matter sampled at a ' + f'{res_mm} average vertex spacing on the surface and as {res_mm} voxels subcortically' ), - "SpatialReference": { - "VolumeReference": volume_url, - "CIFTI_STRUCTURE_LEFT_CORTEX": surfaces_url % "L", - "CIFTI_STRUCTURE_RIGHT_CORTEX": surfaces_url % "R", - } + 'SpatialReference': { + 'VolumeReference': volume_url, + 'CIFTI_STRUCTURE_LEFT_CORTEX': surfaces_url % 'L', + 'CIFTI_STRUCTURE_RIGHT_CORTEX': surfaces_url % 'R', + }, } return surface_labels, volume_label, metadata @@ -265,10 +250,10 @@ def _prepare_cifti(grayordinates: str) -> typing.Tuple[list, str, dict]: def _create_cifti_image( bold_file: str, volume_label: str, - bold_surfs: typing.Tuple[str, str], - surface_labels: typing.Tuple[str, str], + bold_surfs: tuple[str, str], + surface_labels: tuple[str, str], tr: float, - metadata: typing.Optional[dict] = None, + metadata: dict | None = None, ): """ Generate CIFTI image in target space. @@ -296,31 +281,31 @@ def _create_cifti_image( bold_img = nb.load(bold_file) label_img = nb.load(volume_label) if label_img.shape != bold_img.shape[:3]: - warnings.warn("Resampling bold volume to match label dimensions") + warnings.warn('Resampling bold volume to match label dimensions', stacklevel=1) bold_img = resample_to_img(bold_img, label_img) # ensure images match HCP orientation (LAS) - bold_img = reorient_image(bold_img, target_ornt="LAS") - label_img = reorient_image(label_img, target_ornt="LAS") + bold_img = reorient_image(bold_img, target_ornt='LAS') + label_img = reorient_image(label_img, target_ornt='LAS') - bold_data = bold_img.get_fdata(dtype="float32") + bold_data = bold_img.get_fdata(dtype='float32') timepoints = bold_img.shape[3] - label_data = np.asanyarray(label_img.dataobj).astype("int16") + label_data = np.asanyarray(label_img.dataobj).astype('int16') # Create brain models idx_offset = 0 brainmodels = [] - bm_ts = np.empty((timepoints, 0), dtype="float32") + bm_ts = np.empty((timepoints, 0), dtype='float32') for structure, labels in CIFTI_STRUCT_WITH_LABELS.items(): if labels is None: # surface model - model_type = "CIFTI_MODEL_TYPE_SURFACE" + model_type = 'CIFTI_MODEL_TYPE_SURFACE' # use the corresponding annotation - hemi = structure.split("_")[-1] + hemi = structure.split('_')[-1] # currently only supports L/R cortex - surf_ts = nb.load(bold_surfs[hemi == "RIGHT"]) + surf_ts = nb.load(bold_surfs[hemi == 'RIGHT']) surf_verts = len(surf_ts.darrays[0].data) - labels = nb.load(surface_labels[hemi == "RIGHT"]) + labels = nb.load(surface_labels[hemi == 'RIGHT']) medial = np.nonzero(labels.darrays[0].data)[0] # extract values across volumes ts = np.array([tsarr.data[medial] for tsarr in surf_ts.darrays]) @@ -337,7 +322,7 @@ def _create_cifti_image( idx_offset += len(vert_idx) bm_ts = np.column_stack((bm_ts, ts)) else: - model_type = "CIFTI_MODEL_TYPE_VOXELS" + model_type = 'CIFTI_MODEL_TYPE_VOXELS' vox = [] ts = [] for label in labels: @@ -374,21 +359,21 @@ def _create_cifti_image( # generate Matrix information series_map = ci.Cifti2MatrixIndicesMap( (0,), - "CIFTI_INDEX_TYPE_SERIES", + 'CIFTI_INDEX_TYPE_SERIES', number_of_series_points=timepoints, series_exponent=0, series_start=0.0, series_step=tr, - series_unit="SECOND", + series_unit='SECOND', ) geometry_map = ci.Cifti2MatrixIndicesMap( - (1,), "CIFTI_INDEX_TYPE_BRAIN_MODELS", maps=brainmodels + (1,), 'CIFTI_INDEX_TYPE_BRAIN_MODELS', maps=brainmodels ) # provide some metadata to CIFTI matrix if not metadata: metadata = { - "surface": "fsLR", - "volume": "MNI152NLin6Asym", + 'surface': 'fsLR', + 'volume': 'MNI152NLin6Asym', } # generate and save CIFTI image matrix = ci.Cifti2Matrix() @@ -398,8 +383,8 @@ def _create_cifti_image( hdr = ci.Cifti2Header(matrix) img = ci.Cifti2Image(dataobj=bm_ts, header=hdr) img.set_data_dtype(bold_img.get_data_dtype()) - img.nifti_header.set_intent("NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES") + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES') - out_file = "{}.dtseries.nii".format(split_filename(bold_file)[1]) + out_file = f'{split_filename(bold_file)[1]}.dtseries.nii' ci.save(img, out_file) return Path.cwd() / out_file diff --git a/niworkflows/interfaces/confounds.py b/niworkflows/interfaces/confounds.py index 0a7e7c1ac7b..1f7d026b2ef 100644 --- a/niworkflows/interfaces/confounds.py +++ b/niworkflows/interfaces/confounds.py @@ -21,34 +21,34 @@ # https://www.nipreps.org/community/licensing/ # """Select terms for a confound model, and compute any requisite expansions.""" + +import operator import os import re +from collections import OrderedDict, deque +from functools import reduce + import numpy as np -import operator import pandas as pd -from functools import reduce -from collections import deque, OrderedDict -from nipype.utils.filemanip import fname_presuffix -from nipype.utils.misc import normalize_mc_params from nipype.interfaces.base import ( - traits, - TraitedSpec, BaseInterfaceInputSpec, File, - isdefined, SimpleInterface, + TraitedSpec, + isdefined, + traits, ) +from nipype.utils.filemanip import fname_presuffix +from nipype.utils.misc import normalize_mc_params class _NormalizeMotionParamsInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="the input parameters file") - format = traits.Enum( - "FSL", "AFNI", "FSFAST", "NIPY", usedefault=True, desc="output format" - ) + in_file = File(exists=True, mandatory=True, desc='the input parameters file') + format = traits.Enum('FSL', 'AFNI', 'FSFAST', 'NIPY', usedefault=True, desc='output format') class _NormalizeMotionParamsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") + out_file = File(exists=True, desc='written file path') class NormalizeMotionParams(SimpleInterface): @@ -62,8 +62,8 @@ def _run_interface(self, runtime): mpars = np.apply_along_axis( func1d=normalize_mc_params, axis=1, arr=mpars, source=self.inputs.format ) - self._results["out_file"] = os.path.join(runtime.cwd, "motion_params.txt") - np.savetxt(self._results["out_file"], mpars) + self._results['out_file'] = os.path.join(runtime.cwd, 'motion_params.txt') + np.savetxt(self._results['out_file'], mpars) return runtime @@ -71,11 +71,11 @@ class _ExpandModelInputSpec(BaseInterfaceInputSpec): confounds_file = File( exists=True, mandatory=True, - desc="TSV containing confound time series for " - "expansion according to the specified formula.", + desc='TSV containing confound time series for ' + 'expansion according to the specified formula.', ) model_formula = traits.Str( - "(dd1(rps + wm + csf + gsr))^^2 + others", + '(dd1(rps + wm + csf + gsr))^^2 + others', usedefault=True, desc="""\ Formula for generating model expansions. By default, the @@ -104,11 +104,11 @@ class _ExpandModelInputSpec(BaseInterfaceInputSpec): """, ) - output_file = File(desc="Output path") + output_file = File(desc='Output path') class _ExpandModelOutputSpec(TraitedSpec): - confounds_file = File(exists=True, desc="Output confounds file") + confounds_file = File(exists=True, desc='Output confounds file') class ExpandModel(SimpleInterface): @@ -123,19 +123,19 @@ def _run_interface(self, runtime): else: out_file = fname_presuffix( self.inputs.confounds_file, - suffix="_expansion.tsv", + suffix='_expansion.tsv', newpath=runtime.cwd, use_ext=False, ) - confounds_data = pd.read_csv(self.inputs.confounds_file, sep="\t") + confounds_data = pd.read_csv(self.inputs.confounds_file, sep='\t') _, confounds_data = parse_formula( model_formula=self.inputs.model_formula, parent_data=confounds_data, unscramble=True, ) - confounds_data.to_csv(out_file, sep="\t", index=False, na_rep="n/a") - self._results["confounds_file"] = out_file + confounds_data.to_csv(out_file, sep='\t', index=False, na_rep='n/a') + self._results['confounds_file'] = out_file return runtime @@ -143,52 +143,50 @@ class _SpikeRegressorsInputSpec(BaseInterfaceInputSpec): confounds_file = File( exists=True, mandatory=True, - desc="TSV containing criterion time series (e.g., framewise " - "displacement, DVARS) to be used for creating spike regressors.", + desc='TSV containing criterion time series (e.g., framewise ' + 'displacement, DVARS) to be used for creating spike regressors.', ) fd_thresh = traits.Float( 0.5, usedefault=True, - desc="Minimum framewise displacement threshold for flagging a frame " - "as a spike.", + desc='Minimum framewise displacement threshold for flagging a frame as a spike.', ) dvars_thresh = traits.Float( 1.5, usedefault=True, - desc="Minimum standardised DVARS threshold for flagging a frame as a spike.", + desc='Minimum standardised DVARS threshold for flagging a frame as a spike.', ) header_prefix = traits.Str( - "motion_outlier", + 'motion_outlier', usedefault=True, - desc="Prefix for spikes in the output TSV header", + desc='Prefix for spikes in the output TSV header', ) lags = traits.List( traits.Int, value=[0], usedefault=True, - desc="Relative indices of lagging frames to flag for each flagged frame", + desc='Relative indices of lagging frames to flag for each flagged frame', ) minimum_contiguous = traits.Either( None, traits.Int, usedefault=True, - desc="Minimum number of contiguous volumes required to avoid " - "flagging as a spike", + desc='Minimum number of contiguous volumes required to avoid flagging as a spike', ) concatenate = traits.Bool( True, usedefault=True, - desc="Indicates whether to concatenate spikes to existing confounds " - "or return spikes only", + desc='Indicates whether to concatenate spikes to existing confounds ' + 'or return spikes only', ) output_format = traits.Enum( - "spikes", "mask", usedefault=True, desc="Format of output (spikes or mask)" + 'spikes', 'mask', usedefault=True, desc='Format of output (spikes or mask)' ) - output_file = File(desc="Output path") + output_file = File(desc='Output path') class _SpikeRegressorsOutputSpec(TraitedSpec): - confounds_file = File(exists=True, desc="Output confounds file") + confounds_file = File(exists=True, desc='Output confounds file') class SpikeRegressors(SimpleInterface): @@ -203,17 +201,17 @@ def _run_interface(self, runtime): else: out_file = fname_presuffix( self.inputs.confounds_file, - suffix="_desc-motion_outliers.tsv", + suffix='_desc-motion_outliers.tsv', newpath=runtime.cwd, use_ext=False, ) spike_criteria = { - "framewise_displacement": (">", self.inputs.fd_thresh), - "std_dvars": (">", self.inputs.dvars_thresh), + 'framewise_displacement': ('>', self.inputs.fd_thresh), + 'std_dvars': ('>', self.inputs.dvars_thresh), } - confounds_data = pd.read_csv(self.inputs.confounds_file, sep="\t") + confounds_data = pd.read_csv(self.inputs.confounds_file, sep='\t') confounds_data = spike_regressors( data=confounds_data, criteria=spike_criteria, @@ -223,19 +221,19 @@ def _run_interface(self, runtime): concatenate=self.inputs.concatenate, output=self.inputs.output_format, ) - confounds_data.to_csv(out_file, sep="\t", index=False, na_rep="n/a") - self._results["confounds_file"] = out_file + confounds_data.to_csv(out_file, sep='\t', index=False, na_rep='n/a') + self._results['confounds_file'] = out_file return runtime def spike_regressors( data, criteria=None, - header_prefix="motion_outlier", + header_prefix='motion_outlier', lags=None, minimum_contiguous=None, concatenate=True, - output="spikes", + output='spikes', ): """ Add spike regressors to a confound/nuisance matrix. @@ -288,30 +286,30 @@ def spike_regressors( indices = range(data.shape[0]) lags = lags or [0] criteria = criteria or { - "framewise_displacement": (">", 0.5), - "std_dvars": (">", 1.5), + 'framewise_displacement': ('>', 0.5), + 'std_dvars': ('>', 1.5), } for metric, (criterion, threshold) in criteria.items(): - if criterion == "<": + if criterion == '<': mask[metric] = set(np.where(data[metric] < threshold)[0]) - elif criterion == ">": + elif criterion == '>': mask[metric] = set(np.where(data[metric] > threshold)[0]) mask = reduce(operator.or_, mask.values()) for lag in lags: - mask = set([m + lag for m in mask]) | mask + mask = {m + lag for m in mask} | mask mask = mask.intersection(indices) if minimum_contiguous is not None: post_final = data.shape[0] + 1 - epoch_length = np.diff(sorted(mask | set([-1, post_final]))) - 1 - epoch_end = sorted(mask | set([post_final])) + epoch_length = np.diff(sorted(mask | {-1, post_final})) - 1 + epoch_end = sorted(mask | {post_final}) for end, length in zip(epoch_end, epoch_length): if length < minimum_contiguous: mask = mask | set(range(end - length, end)) mask = mask.intersection(indices) - if output == "mask": + if output == 'mask': spikes = np.zeros(data.shape[0]) spikes[list(mask)] = 1 spikes = pd.DataFrame(data=spikes, columns=[header_prefix]) @@ -319,7 +317,7 @@ def spike_regressors( spikes = np.zeros((max(indices) + 1, len(mask))) for i, m in enumerate(sorted(mask)): spikes[m, i] = 1 - header = ["{:s}{:02d}".format(header_prefix, vol) for vol in range(len(mask))] + header = [f'{header_prefix:s}{vol:02d}' for vol in range(len(mask))] spikes = pd.DataFrame(data=spikes, columns=header) if concatenate: return pd.concat((data, spikes), axis=1) @@ -358,9 +356,9 @@ def temporal_derivatives(order, variables, data): if 0 in order: data_deriv[0] = data[variables] variables_deriv[0] = variables - order = set(order) - set([0]) + order = set(order) - {0} for o in order: - variables_deriv[o] = ["{}_derivative{}".format(v, o) for v in variables] + variables_deriv[o] = [f'{v}_derivative{o}' for v in variables] data_deriv[o] = np.tile(np.nan, data[variables].shape) data_deriv[o][o:, :] = np.diff(data[variables], n=o, axis=0) variables_deriv = reduce(operator.add, variables_deriv.values()) @@ -401,9 +399,9 @@ def exponential_terms(order, variables, data): if 1 in order: data_exp[1] = data[variables] variables_exp[1] = variables - order = set(order) - set([1]) + order = set(order) - {1} for o in order: - variables_exp[o] = ["{}_power{}".format(v, o) for v in variables] + variables_exp[o] = [f'{v}_power{o}' for v in variables] data_exp[o] = data[variables] ** o variables_exp = reduce(operator.add, variables_exp.values()) data_exp = pd.DataFrame( @@ -416,7 +414,7 @@ def _order_as_range(order): """Convert a hyphenated string representing order for derivative or exponential terms into a range object that can be passed as input to the appropriate expansion function.""" - order = order.split("-") + order = order.split('-') order = [int(o) for o in order] if len(order) > 1: order = range(order[0], (order[-1] + 1)) @@ -427,12 +425,12 @@ def _check_and_expand_exponential(expr, variables, data): """Check if the current operation specifies exponential expansion. ^^6 specifies all powers up to the 6th, ^5-6 the 5th and 6th powers, ^6 the 6th only.""" - if re.search(r"\^\^[0-9]+$", expr): - order = re.compile(r"\^\^([0-9]+)$").findall(expr) + if re.search(r'\^\^[0-9]+$', expr): + order = re.compile(r'\^\^([0-9]+)$').findall(expr) order = range(1, int(*order) + 1) variables, data = exponential_terms(order, variables, data) - elif re.search(r"\^[0-9]+[\-]?[0-9]*$", expr): - order = re.compile(r"\^([0-9]+[\-]?[0-9]*)").findall(expr) + elif re.search(r'\^[0-9]+[\-]?[0-9]*$', expr): + order = re.compile(r'\^([0-9]+[\-]?[0-9]*)').findall(expr) order = _order_as_range(*order) variables, data = exponential_terms(order, variables, data) return variables, data @@ -442,12 +440,12 @@ def _check_and_expand_derivative(expr, variables, data): """Check if the current operation specifies a temporal derivative. dd6x specifies all derivatives up to the 6th, d5-6x the 5th and 6th, d6x the 6th only.""" - if re.search(r"^dd[0-9]+", expr): - order = re.compile(r"^dd([0-9]+)").findall(expr) + if re.search(r'^dd[0-9]+', expr): + order = re.compile(r'^dd([0-9]+)').findall(expr) order = range(0, int(*order) + 1) (variables, data) = temporal_derivatives(order, variables, data) - elif re.search(r"^d[0-9]+[\-]?[0-9]*", expr): - order = re.compile(r"^d([0-9]+[\-]?[0-9]*)").findall(expr) + elif re.search(r'^d[0-9]+[\-]?[0-9]*', expr): + order = re.compile(r'^d([0-9]+[\-]?[0-9]*)').findall(expr) order = _order_as_range(*order) (variables, data) = temporal_derivatives(order, variables, data) return variables, data @@ -458,11 +456,11 @@ def _check_and_expand_subformula(expression, parent_data, variables, data): where appropriate.""" grouping_depth = 0 for i, char in enumerate(expression): - if char == "(": + if char == '(': if grouping_depth == 0: formula_delimiter = i + 1 grouping_depth += 1 - elif char == ")": + elif char == ')': grouping_depth -= 1 if grouping_depth == 0: expr = expression[formula_delimiter:i].strip() @@ -492,9 +490,7 @@ def parse_expression(expression, parent_data): """ variables = None data = None - variables, data = _check_and_expand_subformula( - expression, parent_data, variables, data - ) + variables, data = _check_and_expand_subformula(expression, parent_data, variables, data) variables, data = _check_and_expand_exponential(expression, variables, data) variables, data = _check_and_expand_derivative(expression, variables, data) if variables is None: @@ -506,53 +502,53 @@ def parse_expression(expression, parent_data): def _get_matches_from_data(regex, variables): matches = re.compile(regex) - matches = " + ".join([v for v in variables if matches.match(v)]) + matches = ' + '.join([v for v in variables if matches.match(v)]) return matches def _get_variables_from_formula(model_formula): symbols_to_clear = [ - " ", - r"\(", - r"\)", - "dd[0-9]+", - r"d[0-9]+[\-]?[0-9]*", - r"\^\^[0-9]+", - r"\^[0-9]+[\-]?[0-9]*", + ' ', + r'\(', + r'\)', + 'dd[0-9]+', + r'd[0-9]+[\-]?[0-9]*', + r'\^\^[0-9]+', + r'\^[0-9]+[\-]?[0-9]*', ] for symbol in symbols_to_clear: - model_formula = re.sub(symbol, "", model_formula) - variables = model_formula.split("+") + model_formula = re.sub(symbol, '', model_formula) + variables = model_formula.split('+') return variables def _expand_shorthand(model_formula, variables): """Expand shorthand terms in the model formula.""" - wm = "white_matter" - gsr = "global_signal" - rps = "trans_x + trans_y + trans_z + rot_x + rot_y + rot_z" - fd = "framewise_displacement" - acc = _get_matches_from_data("a_comp_cor_[0-9]+", variables) - tcc = _get_matches_from_data("t_comp_cor_[0-9]+", variables) - dv = _get_matches_from_data("^std_dvars$", variables) - dvall = _get_matches_from_data(".*dvars", variables) - nss = _get_matches_from_data("non_steady_state_outlier[0-9]+", variables) - spikes = _get_matches_from_data("motion_outlier[0-9]+", variables) - - model_formula = re.sub("wm", wm, model_formula) - model_formula = re.sub("gsr", gsr, model_formula) - model_formula = re.sub("rps", rps, model_formula) - model_formula = re.sub("fd", fd, model_formula) - model_formula = re.sub("acc", acc, model_formula) - model_formula = re.sub("tcc", tcc, model_formula) - model_formula = re.sub("dv", dv, model_formula) - model_formula = re.sub("dvall", dvall, model_formula) - model_formula = re.sub("nss", nss, model_formula) - model_formula = re.sub("spikes", spikes, model_formula) + wm = 'white_matter' + gsr = 'global_signal' + rps = 'trans_x + trans_y + trans_z + rot_x + rot_y + rot_z' + fd = 'framewise_displacement' + acc = _get_matches_from_data('a_comp_cor_[0-9]+', variables) + tcc = _get_matches_from_data('t_comp_cor_[0-9]+', variables) + dv = _get_matches_from_data('^std_dvars$', variables) + dvall = _get_matches_from_data('.*dvars', variables) + nss = _get_matches_from_data('non_steady_state_outlier[0-9]+', variables) + spikes = _get_matches_from_data('motion_outlier[0-9]+', variables) + + model_formula = re.sub('wm', wm, model_formula) + model_formula = re.sub('gsr', gsr, model_formula) + model_formula = re.sub('rps', rps, model_formula) + model_formula = re.sub('fd', fd, model_formula) + model_formula = re.sub('acc', acc, model_formula) + model_formula = re.sub('tcc', tcc, model_formula) + model_formula = re.sub('dv', dv, model_formula) + model_formula = re.sub('dvall', dvall, model_formula) + model_formula = re.sub('nss', nss, model_formula) + model_formula = re.sub('spikes', spikes, model_formula) formula_variables = _get_variables_from_formula(model_formula) - others = " + ".join(set(variables) - set(formula_variables)) - model_formula = re.sub("others", others, model_formula) + others = ' + '.join(set(variables) - set(formula_variables)) + model_formula = re.sub('others', others, model_formula) return model_formula @@ -561,12 +557,12 @@ def _unscramble_regressor_columns(parent_data, data): the same order as the input data with any expansion columns inserted immediately after the originals. """ - matches = ["_power[0-9]+", "_derivative[0-9]+"] + matches = ['_power[0-9]+', '_derivative[0-9]+'] var = OrderedDict((c, deque()) for c in parent_data.columns) for c in data.columns: col = c for m in matches: - col = re.sub(m, "", col) + col = re.sub(m, '', col) if col == c: var[col].appendleft(c) else: @@ -629,11 +625,11 @@ def parse_formula(model_formula, parent_data, unscramble=False): grouping_depth = 0 model_formula = _expand_shorthand(model_formula, parent_data.columns) for i, char in enumerate(model_formula): - if char == "(": + if char == '(': grouping_depth += 1 - elif char == ")": + elif char == ')': grouping_depth -= 1 - elif grouping_depth == 0 and char == "+": + elif grouping_depth == 0 and char == '+': expression = model_formula[expr_delimiter:i].strip() variables[expression] = None data[expression] = None @@ -642,14 +638,12 @@ def parse_formula(model_formula, parent_data, unscramble=False): variables[expression] = None data[expression] = None for expression in list(variables): - if expression[0] == "(" and expression[-1] == ")": + if expression[0] == '(' and expression[-1] == ')': (variables[expression], data[expression]) = parse_formula( expression[1:-1], parent_data ) else: - (variables[expression], data[expression]) = parse_expression( - expression, parent_data - ) + (variables[expression], data[expression]) = parse_expression(expression, parent_data) variables = list(set(reduce(operator.add, variables.values()))) data = pd.concat((data.values()), axis=1) diff --git a/niworkflows/interfaces/conftest.py b/niworkflows/interfaces/conftest.py index 9b42b4002a4..a8511d04bb2 100644 --- a/niworkflows/interfaces/conftest.py +++ b/niworkflows/interfaces/conftest.py @@ -19,17 +19,17 @@ def _chdir(path): os.chdir(cwd) -@pytest.fixture(scope="module") +@pytest.fixture(scope='module') def data_dir(): - return Path(__file__).parent / "tests" / "data" + return Path(__file__).parent / 'tests' / 'data' @pytest.fixture(autouse=True) def _docdir(request, tmp_path): # Trigger ONLY for the doctests. - doctest_plugin = request.config.pluginmanager.getplugin("doctest") + doctest_plugin = request.config.pluginmanager.getplugin('doctest') if isinstance(request.node, doctest_plugin.DoctestItem): - copytree(Path(__file__).parent / "tests" / "data", tmp_path, dirs_exist_ok=True) + copytree(Path(__file__).parent / 'tests' / 'data', tmp_path, dirs_exist_ok=True) # Chdir only for the duration of the test. with _chdir(tmp_path): diff --git a/niworkflows/interfaces/fixes.py b/niworkflows/interfaces/fixes.py index f9aef937281..d308539b2f7 100644 --- a/niworkflows/interfaces/fixes.py +++ b/niworkflows/interfaces/fixes.py @@ -23,18 +23,21 @@ import os import nibabel as nb - -from nipype.interfaces.base import traits, InputMultiObject, File -from nipype.utils.filemanip import fname_presuffix -from nipype.interfaces.ants.resampling import ApplyTransforms, ApplyTransformsInputSpec from nipype.interfaces.ants.registration import ( Registration, +) +from nipype.interfaces.ants.registration import ( RegistrationInputSpec as _RegistrationInputSpec, ) +from nipype.interfaces.ants.resampling import ApplyTransforms, ApplyTransformsInputSpec from nipype.interfaces.ants.segmentation import ( N4BiasFieldCorrection as VanillaN4, +) +from nipype.interfaces.ants.segmentation import ( N4BiasFieldCorrectionOutputSpec as VanillaN4OutputSpec, ) +from nipype.interfaces.base import File, InputMultiObject, traits +from nipype.utils.filemanip import fname_presuffix from .. import __version__ from ..utils.images import _copyxform @@ -43,10 +46,10 @@ class _FixTraitApplyTransformsInputSpec(ApplyTransformsInputSpec): transforms = InputMultiObject( traits.Either(File(exists=True), 'identity'), - argstr="%s", + argstr='%s', mandatory=True, - desc="transform files: will be applied in reverse order. For " - "example, the last specified transform will be applied first.", + desc='transform files: will be applied in reverse order. For ' + 'example, the last specified transform will be applied first.', ) @@ -61,14 +64,12 @@ class FixHeaderApplyTransforms(ApplyTransforms): def _run_interface(self, runtime, correct_return_codes=(0,)): # Run normally - runtime = super()._run_interface( - runtime, correct_return_codes - ) + runtime = super()._run_interface(runtime, correct_return_codes) _copyxform( self.inputs.reference_image, - os.path.abspath(self._gen_filename("output_image")), - message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__), + os.path.abspath(self._gen_filename('output_image')), + message=f'{self.__class__.__name__} (niworkflows v{__version__})', ) return runtime @@ -77,14 +78,14 @@ class _FixHeaderRegistrationInputSpec(_RegistrationInputSpec): restrict_deformation = traits.List( traits.List(traits.Range(low=0.0, high=1.0)), desc=( - "This option allows the user to restrict the optimization of " - "the displacement field, translation, rigid or affine transform " - "on a per-component basis. For example, if one wants to limit " - "the deformation or rotation of 3-D volume to the first two " - "dimensions, this is possible by specifying a weight vector of " + 'This option allows the user to restrict the optimization of ' + 'the displacement field, translation, rigid or affine transform ' + 'on a per-component basis. For example, if one wants to limit ' + 'the deformation or rotation of 3-D volume to the first two ' + 'dimensions, this is possible by specifying a weight vector of ' "'1x1x0' for a deformation field or '1x1x0x1x1x0' for a rigid " - "transformation. Low-dimensional restriction only works if " - "there are no preceding transformations." + 'transformation. Low-dimensional restriction only works if ' + 'there are no preceding transformations.' ), ) @@ -100,9 +101,7 @@ class FixHeaderRegistration(Registration): def _run_interface(self, runtime, correct_return_codes=(0,)): # Run normally - runtime = super()._run_interface( - runtime, correct_return_codes - ) + runtime = super()._run_interface(runtime, correct_return_codes) # Forward transform out_file = self._get_outputfilenames(inverse=False) @@ -110,7 +109,7 @@ def _run_interface(self, runtime, correct_return_codes=(0,)): _copyxform( self.inputs.fixed_image[0], os.path.abspath(out_file), - message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__), + message=f'{self.__class__.__name__} (niworkflows v{__version__})', ) # Inverse transform @@ -119,7 +118,7 @@ def _run_interface(self, runtime, correct_return_codes=(0,)): _copyxform( self.inputs.moving_image[0], os.path.abspath(out_file), - message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__), + message=f'{self.__class__.__name__} (niworkflows v{__version__})', ) return runtime @@ -129,8 +128,8 @@ class _FixN4BiasFieldCorrectionOutputSpec(VanillaN4OutputSpec): negative_values = traits.Bool( False, usedefault=True, - desc="Indicates whether the input was corrected for " - "nonpositive values by adding a constant offset.", + desc='Indicates whether the input was corrected for ' + 'nonpositive values by adding a constant offset.', ) @@ -146,11 +145,9 @@ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def _format_arg(self, name, trait_spec, value): - if name == "input_image": + if name == 'input_image': return trait_spec.argstr % self._input_image - return super()._format_arg( - name, trait_spec, value - ) + return super()._format_arg(name, trait_spec, value) def _parse_inputs(self, skip=None): self._input_image = self.inputs.input_image @@ -159,7 +156,7 @@ def _parse_inputs(self, skip=None): datamin = input_nii.get_fdata().min() if datamin < 0: self._input_image = fname_presuffix( - self.inputs.input_image, suffix="_scaled", newpath=os.getcwd() + self.inputs.input_image, suffix='_scaled', newpath=os.getcwd() ) data = input_nii.get_fdata() - datamin newnii = input_nii.__class__(data, input_nii.affine, input_nii.header) @@ -170,5 +167,5 @@ def _parse_inputs(self, skip=None): def _list_outputs(self): outputs = super()._list_outputs() - outputs["negative_values"] = self._negative_values + outputs['negative_values'] = self._negative_values return outputs diff --git a/niworkflows/interfaces/freesurfer.py b/niworkflows/interfaces/freesurfer.py index 5b31360387f..84901aea6a8 100644 --- a/niworkflows/interfaces/freesurfer.py +++ b/niworkflows/interfaces/freesurfer.py @@ -24,23 +24,24 @@ import os.path as op from pathlib import Path + import nibabel as nb import numpy as np - -from nipype.utils.filemanip import copyfile, filename_to_list, fname_presuffix +from nipype.interfaces import freesurfer as fs from nipype.interfaces.base import ( - isdefined, - InputMultiPath, BaseInterfaceInputSpec, - TraitedSpec, + Directory, File, + InputMultiPath, + SimpleInterface, + TraitedSpec, + isdefined, traits, - Directory, ) -from nipype.interfaces import freesurfer as fs -from nipype.interfaces.base import SimpleInterface from nipype.interfaces.freesurfer.preprocess import ConcatenateLTA, RobustRegister from nipype.interfaces.freesurfer.utils import LTAConvert +from nipype.utils.filemanip import copyfile, filename_to_list, fname_presuffix + from .reportlets.registration import BBRegisterRPT, MRICoregRPT @@ -79,18 +80,18 @@ def _num_vols(self): @property def cmdline(self): if self._num_vols() == 1: - return "echo Only one time point!" + return 'echo Only one time point!' return super().cmdline def _list_outputs(self): outputs = super()._list_outputs() if self._num_vols() == 1: in_file = self.inputs.in_files[0] - outputs["out_file"] = in_file - if isdefined(outputs["transform_outputs"]): - transform_file = outputs["transform_outputs"][0] + outputs['out_file'] = in_file + if isdefined(outputs['transform_outputs']): + transform_file = outputs['transform_outputs'][0] fs.utils.LTAConvert( - in_lta="identity.nofile", + in_lta='identity.nofile', source_file=in_file, target_file=in_file, out_lta=transform_file, @@ -99,7 +100,7 @@ def _list_outputs(self): class _MakeMidthicknessInputSpec(fs.utils.MRIsExpandInputSpec): - graymid = InputMultiPath(desc="Existing graymid/midthickness file") + graymid = InputMultiPath(desc='Existing graymid/midthickness file') class MakeMidthickness(fs.MRIsExpand): @@ -126,8 +127,8 @@ def cmdline(self): # as input source = None in_base = Path(self.inputs.in_file).name - mt = self._associated_file(in_base, "midthickness") - gm = self._associated_file(in_base, "graymid") + mt = self._associated_file(in_base, 'midthickness') + gm = self._associated_file(in_base, 'graymid') for surf in self.inputs.graymid: if Path(surf).name == mt: @@ -139,18 +140,18 @@ def cmdline(self): if source is None: return cmd - return "cp {} {}".format(source, self._list_outputs()["out_file"]) + return 'cp {} {}'.format(source, self._list_outputs()['out_file']) class _FSInjectBrainExtractedInputSpec(BaseInterfaceInputSpec): - subjects_dir = Directory(mandatory=True, desc="FreeSurfer SUBJECTS_DIR") - subject_id = traits.Str(mandatory=True, desc="Subject ID") - in_brain = File(mandatory=True, exists=True, desc="input file, part of a BIDS tree") + subjects_dir = Directory(mandatory=True, desc='FreeSurfer SUBJECTS_DIR') + subject_id = traits.Str(mandatory=True, desc='Subject ID') + in_brain = File(mandatory=True, exists=True, desc='input file, part of a BIDS tree') class _FSInjectBrainExtractedOutputSpec(TraitedSpec): - subjects_dir = Directory(desc="FreeSurfer SUBJECTS_DIR") - subject_id = traits.Str(desc="Subject ID") + subjects_dir = Directory(desc='FreeSurfer SUBJECTS_DIR') + subject_id = traits.Str(desc='Subject ID') class FSInjectBrainExtracted(SimpleInterface): @@ -162,31 +163,27 @@ def _run_interface(self, runtime): subjects_dir, subject_id = inject_skullstripped( self.inputs.subjects_dir, self.inputs.subject_id, self.inputs.in_brain ) - self._results["subjects_dir"] = subjects_dir - self._results["subject_id"] = subject_id + self._results['subjects_dir'] = subjects_dir + self._results['subject_id'] = subject_id return runtime class _FSDetectInputsInputSpec(BaseInterfaceInputSpec): t1w_list = InputMultiPath( - File(exists=True), mandatory=True, desc="input file, part of a BIDS tree" - ) - t2w_list = InputMultiPath(File(exists=True), desc="input file, part of a BIDS tree") - flair_list = InputMultiPath( - File(exists=True), desc="input file, part of a BIDS tree" - ) - hires_enabled = traits.Bool( - True, usedefault=True, desc="enable hi-resolution processing" + File(exists=True), mandatory=True, desc='input file, part of a BIDS tree' ) + t2w_list = InputMultiPath(File(exists=True), desc='input file, part of a BIDS tree') + flair_list = InputMultiPath(File(exists=True), desc='input file, part of a BIDS tree') + hires_enabled = traits.Bool(True, usedefault=True, desc='enable hi-resolution processing') class _FSDetectInputsOutputSpec(TraitedSpec): - t2w = File(desc="reference T2w image") - use_t2w = traits.Bool(desc="enable use of T2w downstream computation") - flair = File(desc="reference FLAIR image") - use_flair = traits.Bool(desc="enable use of FLAIR downstream computation") - hires = traits.Bool(desc="enable hi-res processing") - mris_inflate = traits.Str(desc="mris_inflate argument") + t2w = File(desc='reference T2w image') + use_t2w = traits.Bool(desc='enable use of T2w downstream computation') + flair = File(desc='reference FLAIR image') + use_flair = traits.Bool(desc='enable use of FLAIR downstream computation') + hires = traits.Bool(desc='enable hi-res processing') + mris_inflate = traits.Str(desc='mris_inflate argument') class FSDetectInputs(SimpleInterface): @@ -194,25 +191,23 @@ class FSDetectInputs(SimpleInterface): output_spec = _FSDetectInputsOutputSpec def _run_interface(self, runtime): - t2w, flair, self._results["hires"], mris_inflate = detect_inputs( + t2w, flair, self._results['hires'], mris_inflate = detect_inputs( self.inputs.t1w_list, t2w_list=self.inputs.t2w_list if isdefined(self.inputs.t2w_list) else None, - flair_list=self.inputs.flair_list - if isdefined(self.inputs.flair_list) - else None, + flair_list=self.inputs.flair_list if isdefined(self.inputs.flair_list) else None, hires_enabled=self.inputs.hires_enabled, ) - self._results["use_t2w"] = t2w is not None - if self._results["use_t2w"]: - self._results["t2w"] = t2w + self._results['use_t2w'] = t2w is not None + if self._results['use_t2w']: + self._results['t2w'] = t2w - self._results["use_flair"] = flair is not None - if self._results["use_flair"]: - self._results["flair"] = flair + self._results['use_flair'] = flair is not None + if self._results['use_flair']: + self._results['flair'] = flair - if self._results["hires"]: - self._results["mris_inflate"] = mris_inflate + if self._results['hires']: + self._results['mris_inflate'] = mris_inflate return runtime @@ -234,10 +229,9 @@ class TruncateLTA: """ # Use a tuple in case some object produces multiple transforms - lta_outputs = ("out_lta_file",) + lta_outputs = ('out_lta_file',) def _post_run_hook(self, runtime): - outputs = self._list_outputs() for lta_name in self.lta_outputs: @@ -264,7 +258,7 @@ class PatchedConcatenateLTA(TruncateLTA, ConcatenateLTA): the fix is now done through mixin with TruncateLTA """ - lta_outputs = ["out_file"] + lta_outputs = ['out_file'] class PatchedLTAConvert(TruncateLTA, LTAConvert): @@ -273,7 +267,7 @@ class PatchedLTAConvert(TruncateLTA, LTAConvert): truncate filename through mixin TruncateLTA """ - lta_outputs = ("out_lta",) + lta_outputs = ('out_lta',) class PatchedBBRegisterRPT(TruncateLTA, BBRegisterRPT): @@ -285,25 +279,21 @@ class PatchedMRICoregRPT(TruncateLTA, MRICoregRPT): class PatchedRobustRegister(TruncateLTA, RobustRegister): - lta_outputs = ("out_reg_file", "half_source_xfm", "half_targ_xfm") + lta_outputs = ('out_reg_file', 'half_source_xfm', 'half_targ_xfm') class _RefineBrainMaskInputSpec(BaseInterfaceInputSpec): - in_anat = File( - exists=True, mandatory=True, desc="input anatomical reference (INU corrected)" - ) - in_aseg = File( - exists=True, mandatory=True, desc="input ``aseg`` file, in NifTi format." - ) + in_anat = File(exists=True, mandatory=True, desc='input anatomical reference (INU corrected)') + in_aseg = File(exists=True, mandatory=True, desc='input ``aseg`` file, in NifTi format.') in_ants = File( exists=True, mandatory=True, - desc="brain tissue segmentation generated with antsBrainExtraction.sh", + desc='brain tissue segmentation generated with antsBrainExtraction.sh', ) class _RefineBrainMaskOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="new mask") + out_file = File(exists=True, desc='new mask') class RefineBrainMask(SimpleInterface): @@ -317,35 +307,34 @@ class RefineBrainMask(SimpleInterface): output_spec = _RefineBrainMaskOutputSpec def _run_interface(self, runtime): - - self._results["out_file"] = fname_presuffix( - self.inputs.in_anat, suffix="_rbrainmask", newpath=runtime.cwd + self._results['out_file'] = fname_presuffix( + self.inputs.in_anat, suffix='_rbrainmask', newpath=runtime.cwd ) anatnii = nb.load(self.inputs.in_anat) msknii = nb.Nifti1Image( grow_mask( - anatnii.get_fdata(dtype="float32"), - np.asanyarray(nb.load(self.inputs.in_aseg).dataobj).astype("int16"), - np.asanyarray(nb.load(self.inputs.in_ants).dataobj).astype("int16"), + anatnii.get_fdata(dtype='float32'), + np.asanyarray(nb.load(self.inputs.in_aseg).dataobj).astype('int16'), + np.asanyarray(nb.load(self.inputs.in_ants).dataobj).astype('int16'), ), anatnii.affine, anatnii.header, ) msknii.set_data_dtype(np.uint8) - msknii.to_filename(self._results["out_file"]) + msknii.to_filename(self._results['out_file']) return runtime class _MedialNaNsInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input surface file") - subjects_dir = Directory(mandatory=True, desc="FreeSurfer SUBJECTS_DIR") - density = traits.Enum("32k", "59k", "164k", desc="Input file density (fsLR only)") + in_file = File(exists=True, mandatory=True, desc='input surface file') + subjects_dir = Directory(mandatory=True, desc='FreeSurfer SUBJECTS_DIR') + density = traits.Enum('32k', '59k', '164k', desc='Input file density (fsLR only)') class _MedialNaNsOutputSpec(TraitedSpec): - out_file = File(desc="the output surface file") + out_file = File(desc='the output surface file') class MedialNaNs(SimpleInterface): @@ -355,7 +344,7 @@ class MedialNaNs(SimpleInterface): output_spec = _MedialNaNsOutputSpec def _run_interface(self, runtime): - self._results["out_file"] = medial_wall_to_nan( + self._results['out_file'] = medial_wall_to_nan( self.inputs.in_file, self.inputs.subjects_dir, self.inputs.density, @@ -401,33 +390,31 @@ def fix_lta_length(lta_file): fixed = False newfile = [] for line in lines: - if line.startswith("filename = ") and len(line.strip("\n")) >= 255: + if line.startswith('filename = ') and len(line.strip('\n')) >= 255: fixed = True - newfile.append("filename = path_too_long\n") + newfile.append('filename = path_too_long\n') else: newfile.append(line) if fixed: - Path(lta_file).write_text("".join(newfile)) + Path(lta_file).write_text(''.join(newfile)) return fixed def inject_skullstripped(subjects_dir, subject_id, skullstripped): - from nilearn.image import resample_to_img, new_img_like + from nilearn.image import new_img_like, resample_to_img - mridir = op.join(subjects_dir, subject_id, "mri") - t1 = op.join(mridir, "T1.mgz") - bm_auto = op.join(mridir, "brainmask.auto.mgz") - bm = op.join(mridir, "brainmask.mgz") + mridir = op.join(subjects_dir, subject_id, 'mri') + t1 = op.join(mridir, 'T1.mgz') + bm_auto = op.join(mridir, 'brainmask.auto.mgz') + bm = op.join(mridir, 'brainmask.mgz') if not op.exists(bm_auto): img = nb.load(t1) mask = nb.load(skullstripped) bmask = new_img_like(mask, np.asanyarray(mask.dataobj) > 0) - resampled_mask = resample_to_img(bmask, img, "nearest") - masked_image = new_img_like( - img, np.asanyarray(img.dataobj) * resampled_mask.dataobj - ) + resampled_mask = resample_to_img(bmask, img, 'nearest') + masked_image = new_img_like(img, np.asanyarray(img.dataobj) * resampled_mask.dataobj) masked_image.to_filename(bm_auto) if not op.exists(bm): @@ -455,7 +442,7 @@ def detect_inputs(t1w_list, t2w_list=None, flair_list=None, hires_enabled=True): flair = flair_list[0] # https://surfer.nmr.mgh.harvard.edu/fswiki/SubmillimeterRecon - mris_inflate = "-n 50" if hires else None + mris_inflate = '-n 50' if hires else None return (t2w, flair, hires, mris_inflate) @@ -476,8 +463,8 @@ def refine_aseg(aseg, ball_size=4): cerebral brain is segmented out). """ - from skimage import morphology as sim from scipy.ndimage.morphology import binary_fill_holes + from skimage import morphology as sim # Read aseg data bmask = aseg.copy() @@ -522,9 +509,9 @@ def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4): continue window = gm[ - pixel[0] - ww:pixel[0] + ww, - pixel[1] - ww:pixel[1] + ww, - pixel[2] - ww:pixel[2] + ww, + pixel[0] - ww : pixel[0] + ww, + pixel[1] - ww : pixel[1] + ww, + pixel[2] - ww : pixel[2] + ww, ] if np.any(window > 0): mu = window[window > 0].mean() @@ -539,27 +526,26 @@ def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4): def medial_wall_to_nan(in_file, subjects_dir, den=None, newpath=None): """Convert values on medial wall to NaNs.""" import os + import nibabel as nb import numpy as np import templateflow.api as tf fn = os.path.basename(in_file) - target_subject = in_file.split(".")[1] - if not target_subject.startswith("fs"): + target_subject = in_file.split('.')[1] + if not target_subject.startswith('fs'): return in_file func = nb.load(in_file) - if target_subject.startswith("fsaverage"): + if target_subject.startswith('fsaverage'): cortex = nb.freesurfer.read_label( - os.path.join( - subjects_dir, target_subject, "label", "{}.cortex.label".format(fn[:2]) - ) + os.path.join(subjects_dir, target_subject, 'label', f'{fn[:2]}.cortex.label') ) medial = np.delete(np.arange(len(func.darrays[0].data)), cortex) - elif target_subject == "fslr" and den is not None: + elif target_subject == 'fslr' and den is not None: hemi = fn[0].upper() label_file = str( - tf.get("fsLR", hemi=hemi, desc="nomedialwall", density=den, suffix="dparc") + tf.get('fsLR', hemi=hemi, desc='nomedialwall', density=den, suffix='dparc') ) label = nb.load(label_file) medial = np.invert(label.darrays[0].data.astype(bool)) @@ -576,11 +562,12 @@ def medial_wall_to_nan(in_file, subjects_dir, den=None, newpath=None): def mri_info(fname, argument): import subprocess as sp + import numpy as np - cmd_info = "mri_info --%s %s" % (argument, fname) - proc = sp.Popen(cmd_info, stdout=sp.PIPE, shell=True) + cmd_info = f'mri_info --{argument} {fname}' + proc = sp.Popen(cmd_info, stdout=sp.PIPE, shell=True) # noqa: S602 data = bytearray(proc.stdout.read()) - mstring = np.fromstring(data.decode("utf-8"), sep="\n") + mstring = np.fromstring(data.decode('utf-8'), sep='\n') result = np.reshape(mstring, (4, -1)) return result diff --git a/niworkflows/interfaces/header.py b/niworkflows/interfaces/header.py index 18808ed6a5b..5c2c5dbf496 100644 --- a/niworkflows/interfaces/header.py +++ b/niworkflows/interfaces/header.py @@ -21,33 +21,34 @@ # https://www.nipreps.org/community/licensing/ # """Handling NIfTI headers.""" + import os import shutil from textwrap import indent -import numpy as np + import nibabel as nb +import numpy as np import transforms3d - from nipype import logging -from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.base import ( - traits, - File, - TraitedSpec, BaseInterfaceInputSpec, - SimpleInterface, DynamicTraitedSpec, + File, + SimpleInterface, + TraitedSpec, + traits, ) from nipype.interfaces.io import add_traits -from ..utils.images import _copyxform -from .. import __version__ +from nipype.utils.filemanip import fname_presuffix +from .. import __version__ +from ..utils.images import _copyxform -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _CopyXFormInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec): - hdr_file = File(exists=True, mandatory=True, desc="the file we get the header from") + hdr_file = File(exists=True, mandatory=True, desc='the file we get the header from') class CopyXForm(SimpleInterface): @@ -63,7 +64,7 @@ class CopyXForm(SimpleInterface): output_spec = DynamicTraitedSpec def __init__(self, fields=None, **inputs): - self._fields = fields or ["in_file"] + self._fields = fields or ['in_file'] if isinstance(self._fields, str): self._fields = [self._fields] @@ -77,10 +78,10 @@ def _outputs(self): base = super()._outputs() if self._fields: fields = self._fields.copy() - if "in_file" in fields: - idx = fields.index("in_file") + if 'in_file' in fields: + idx = fields.index('in_file') fields.pop(idx) - fields.insert(idx, "out_file") + fields.insert(idx, 'out_file') base = add_traits(base, fields) return base @@ -92,15 +93,13 @@ def _run_interface(self, runtime): if isinstance(in_files, str): in_files = [in_files] for in_file in in_files: - out_name = fname_presuffix( - in_file, suffix="_xform", newpath=runtime.cwd - ) + out_name = fname_presuffix(in_file, suffix='_xform', newpath=runtime.cwd) # Copy and replace header shutil.copy(in_file, out_name) _copyxform( self.inputs.hdr_file, out_name, - message="CopyXForm (niworkflows v%s)" % __version__, + message=f'CopyXForm (niworkflows v{__version__})', ) self._results[f].append(out_name) @@ -108,19 +107,19 @@ def _run_interface(self, runtime): if len(self._results[f]) == 1: self._results[f] = self._results[f][0] - default = self._results.pop("in_file", None) + default = self._results.pop('in_file', None) if default: - self._results["out_file"] = default + self._results['out_file'] = default return runtime class _CopyHeaderInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="the file we get the data from") - hdr_file = File(exists=True, mandatory=True, desc="the file we get the header from") + in_file = File(exists=True, mandatory=True, desc='the file we get the data from') + hdr_file = File(exists=True, mandatory=True, desc='the file we get the header from') class _CopyHeaderOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") + out_file = File(exists=True, desc='written file path') class CopyHeader(SimpleInterface): @@ -138,19 +137,19 @@ def _run_interface(self, runtime): new_img = out_img.__class__(out_img.dataobj, in_img.affine, in_img.header) new_img.set_data_dtype(out_img.get_data_dtype()) - out_name = fname_presuffix(self.inputs.in_file, suffix="_fixhdr", newpath=".") + out_name = fname_presuffix(self.inputs.in_file, suffix='_fixhdr', newpath='.') new_img.to_filename(out_name) - self._results["out_file"] = out_name + self._results['out_file'] = out_name return runtime class _ValidateImageInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input image") + in_file = File(exists=True, mandatory=True, desc='input image') class _ValidateImageOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="validated image") - out_report = File(exists=True, desc="HTML segment containing warning") + out_file = File(exists=True, desc='validated image') + out_report = File(exists=True, desc='HTML segment containing warning') class ValidateImage(SimpleInterface): @@ -207,11 +206,11 @@ class ValidateImage(SimpleInterface): def _run_interface(self, runtime): img = nb.load(self.inputs.in_file) - out_report = os.path.join(runtime.cwd, "report.html") + out_report = os.path.join(runtime.cwd, 'report.html') # Retrieve xform codes - sform_code = int(img.header._structarr["sform_code"]) - qform_code = int(img.header._structarr["qform_code"]) + sform_code = int(img.header._structarr['sform_code']) + qform_code = int(img.header._structarr['qform_code']) # Check qform is valid valid_qform = False @@ -234,50 +233,44 @@ def _run_interface(self, runtime): # Both match, qform valid (implicit with match), codes okay -> do nothing, empty report if matching_affines and qform_code > 0 and sform_code > 0: - self._results["out_file"] = self.inputs.in_file - open(out_report, "w").close() - self._results["out_report"] = out_report + self._results['out_file'] = self.inputs.in_file + open(out_report, 'w').close() + self._results['out_report'] = out_report return runtime # A new file will be written - out_fname = fname_presuffix( - self.inputs.in_file, suffix="_valid", newpath=runtime.cwd - ) - self._results["out_file"] = out_fname + out_fname = fname_presuffix(self.inputs.in_file, suffix='_valid', newpath=runtime.cwd) + self._results['out_file'] = out_fname # Row 2: if valid_qform and qform_code > 0 and (sform_code == 0 or not valid_sform): img.set_sform(qform, qform_code) - warning_txt = "Note on orientation: sform matrix set" + warning_txt = 'Note on orientation: sform matrix set' description = """\

The sform has been copied from qform.

""" # Rows 3-4: # Note: if qform is not valid, matching_affines is False - elif (valid_sform and sform_code > 0) and ( - not matching_affines or qform_code == 0 - ): + elif (valid_sform and sform_code > 0) and (not matching_affines or qform_code == 0): img.set_qform(sform, sform_code) new_qform = img.get_qform() if valid_qform: # False alarm - the difference is due to precision loss of qform if np.allclose(new_qform, qform) and qform_code > 0: - self._results["out_file"] = self.inputs.in_file - open(out_report, "w").close() - self._results["out_report"] = out_report + self._results['out_file'] = self.inputs.in_file + open(out_report, 'w').close() + self._results['out_report'] = out_report return runtime # Replacing an existing, valid qform. Report magnitude of change. diff = np.linalg.inv(qform) @ new_qform trans, rot, _, _ = transforms3d.affines.decompose44(diff) angle = transforms3d.axangles.mat2axangle(rot)[1] xyz_unit = img.header.get_xyzt_units()[0] - if xyz_unit == "unknown": - xyz_unit = "mm" + if xyz_unit == 'unknown': + xyz_unit = 'mm' - total_trans = np.sqrt( - np.sum(trans * trans) - ) # Add angle and total_trans to report - warning_txt = "Note on orientation: qform matrix overwritten" + total_trans = np.sqrt(np.sum(trans * trans)) # Add angle and total_trans to report + warning_txt = 'Note on orientation: qform matrix overwritten' description = f"""\

The qform has been copied from sform. @@ -287,7 +280,7 @@ def _run_interface(self, runtime): """ elif qform_code > 0: # qform code indicates the qform is supposed to be valid. Use more stridency. - warning_txt = "WARNING - Invalid qform information" + warning_txt = 'WARNING - Invalid qform information' description = """\

The qform matrix found in the file header is invalid. @@ -298,16 +291,14 @@ def _run_interface(self, runtime): """ else: # qform_code == 0 # qform is not expected to be valids. Simple note. - warning_txt = "Note on orientation: qform matrix overwritten" - description = ( - '

The qform has been copied from sform.

' - ) + warning_txt = 'Note on orientation: qform matrix overwritten' + description = '

The qform has been copied from sform.

' # Rows 5-6: else: affine = img.header.get_base_affine() - img.set_sform(affine, nb.nifti1.xform_codes["scanner"]) - img.set_qform(affine, nb.nifti1.xform_codes["scanner"]) - warning_txt = "WARNING - Missing orientation information" + img.set_sform(affine, nb.nifti1.xform_codes['scanner']) + img.set_qform(affine, nb.nifti1.xform_codes['scanner']) + warning_txt = 'WARNING - Missing orientation information' description = """\

FMRIPREP could not retrieve orientation information from the image header. @@ -315,27 +306,23 @@ def _run_interface(self, runtime): Analyses of this dataset MAY BE INVALID.

""" - snippet = '

%s

\n%s\n' % (warning_txt, description) + snippet = f'

{warning_txt}

\n{description}\n' # Store new file and report img.to_filename(out_fname) - with open(out_report, "w") as fobj: - fobj.write(indent(snippet, "\t" * 3)) + with open(out_report, 'w') as fobj: + fobj.write(indent(snippet, '\t' * 3)) - self._results["out_report"] = out_report + self._results['out_report'] = out_report return runtime class _MatchHeaderInputSpec(BaseInterfaceInputSpec): - reference = File( - exists=True, mandatory=True, desc="NIfTI file with reference header" - ) - in_file = File( - exists=True, mandatory=True, desc="NIfTI file which header will be checked" - ) + reference = File(exists=True, mandatory=True, desc='NIfTI file with reference header') + in_file = File(exists=True, mandatory=True, desc='NIfTI file which header will be checked') class _MatchHeaderOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="NIfTI file with fixed header") + out_file = File(exists=True, desc='NIfTI file with fixed header') class MatchHeader(SimpleInterface): @@ -347,48 +334,42 @@ def _run_interface(self, runtime): imgnii = nb.load(self.inputs.in_file) imghdr = imgnii.header.copy() - imghdr["dim_info"] = refhdr["dim_info"] # dim_info is lost sometimes + imghdr['dim_info'] = refhdr['dim_info'] # dim_info is lost sometimes # Set qform qform = refhdr.get_qform() - qcode = int(refhdr["qform_code"]) + qcode = int(refhdr['qform_code']) if not np.allclose(qform, imghdr.get_qform()): - LOGGER.warning("q-forms of reference and mask are substantially different") + LOGGER.warning('q-forms of reference and mask are substantially different') imghdr.set_qform(qform, qcode) # Set sform sform = refhdr.get_sform() - scode = int(refhdr["sform_code"]) + scode = int(refhdr['sform_code']) if not np.allclose(sform, imghdr.get_sform()): - LOGGER.warning("s-forms of reference and mask are substantially different") + LOGGER.warning('s-forms of reference and mask are substantially different') imghdr.set_sform(sform, scode) - out_file = fname_presuffix( - self.inputs.in_file, suffix="_hdr", newpath=runtime.cwd - ) + out_file = fname_presuffix(self.inputs.in_file, suffix='_hdr', newpath=runtime.cwd) - imgnii.__class__(imgnii.dataobj, imghdr.get_best_affine(), imghdr).to_filename( - out_file - ) - self._results["out_file"] = out_file + imgnii.__class__(imgnii.dataobj, imghdr.get_best_affine(), imghdr).to_filename(out_file) + self._results['out_file'] = out_file return runtime class _SanitizeImageInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input image") - n_volumes_to_discard = traits.Int( - 0, usedefault=True, desc="discard n first volumes" - ) + in_file = File(exists=True, mandatory=True, desc='input image') + n_volumes_to_discard = traits.Int(0, usedefault=True, desc='discard n first volumes') max_32bit = traits.Bool( False, usedefault=True, - desc="cast data to float32 if higher precision is encountered", + desc='cast data to float32 if higher precision is encountered', ) class _SanitizeImageOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="validated image") - out_report = File(exists=True, desc="HTML segment containing warning") + out_file = File(exists=True, desc='validated image') + out_report = File(exists=True, desc='HTML segment containing warning') class SanitizeImage(SimpleInterface): @@ -447,11 +428,11 @@ class SanitizeImage(SimpleInterface): def _run_interface(self, runtime): img = nb.load(self.inputs.in_file) - out_report = os.path.join(runtime.cwd, "report.html") + out_report = os.path.join(runtime.cwd, 'report.html') # Retrieve xform codes - sform_code = int(img.header._structarr["sform_code"]) - qform_code = int(img.header._structarr["qform_code"]) + sform_code = int(img.header._structarr['sform_code']) + qform_code = int(img.header._structarr['qform_code']) # Check qform is valid valid_qform = False @@ -465,18 +446,18 @@ def _run_interface(self, runtime): matching_affines = valid_qform and np.allclose(img.get_qform(), img.get_sform()) save_file = False - warning_txt = "" + warning_txt = '' # Both match, qform valid (implicit with match), codes okay -> do nothing, empty report if matching_affines and qform_code > 0 and sform_code > 0: - self._results["out_file"] = self.inputs.in_file - open(out_report, "w").close() + self._results['out_file'] = self.inputs.in_file + open(out_report, 'w').close() # Row 2: elif valid_qform and qform_code > 0: img.set_sform(img.get_qform(), qform_code) save_file = True - warning_txt = "Note on orientation: sform matrix set" + warning_txt = 'Note on orientation: sform matrix set' description = """\

The sform has been copied from qform.

""" @@ -485,12 +466,12 @@ def _run_interface(self, runtime): elif sform_code > 0 and (not matching_affines or qform_code == 0): img.set_qform(img.get_sform(), sform_code) save_file = True - warning_txt = "Note on orientation: qform matrix overwritten" + warning_txt = 'Note on orientation: qform matrix overwritten' description = """\

The qform has been copied from sform.

""" if not valid_qform and qform_code > 0: - warning_txt = "WARNING - Invalid qform information" + warning_txt = 'WARNING - Invalid qform information' description = """\

The qform matrix found in the file header is invalid. @@ -502,10 +483,10 @@ def _run_interface(self, runtime): # Rows 5-6: else: affine = img.affine - img.set_sform(affine, nb.nifti1.xform_codes["scanner"]) - img.set_qform(affine, nb.nifti1.xform_codes["scanner"]) + img.set_sform(affine, nb.nifti1.xform_codes['scanner']) + img.set_qform(affine, nb.nifti1.xform_codes['scanner']) save_file = True - warning_txt = "WARNING - Missing orientation information" + warning_txt = 'WARNING - Missing orientation information' description = """\

Orientation information could not be retrieved from the image header. @@ -524,7 +505,7 @@ def _run_interface(self, runtime): in_data = img.dataobj img = nb.Nifti1Image( - in_data[:, :, :, self.inputs.n_volumes_to_discard:], + in_data[:, :, :, self.inputs.n_volumes_to_discard :], img.affine, img.header, ) @@ -536,19 +517,14 @@ def _run_interface(self, runtime): # Store new file if save_file: - out_fname = fname_presuffix( - self.inputs.in_file, suffix="_valid", newpath=runtime.cwd - ) - self._results["out_file"] = out_fname + out_fname = fname_presuffix(self.inputs.in_file, suffix='_valid', newpath=runtime.cwd) + self._results['out_file'] = out_fname img.to_filename(out_fname) if warning_txt: - snippet = '

%s

\n%s\n' % ( - warning_txt, - description, - ) - with open(out_report, "w") as fobj: - fobj.write(indent(snippet, "\t" * 3)) + snippet = f'

{warning_txt}

\n{description}\n' + with open(out_report, 'w') as fobj: + fobj.write(indent(snippet, '\t' * 3)) - self._results["out_report"] = out_report + self._results['out_report'] = out_report return runtime diff --git a/niworkflows/interfaces/images.py b/niworkflows/interfaces/images.py index 916c200c7ab..bb04c82ec21 100644 --- a/niworkflows/interfaces/images.py +++ b/niworkflows/interfaces/images.py @@ -21,40 +21,38 @@ # https://www.nipreps.org/community/licensing/ # """Image tools interfaces.""" + import os from functools import partial -import numpy as np -import nibabel as nb +import nibabel as nb +import numpy as np from nipype import logging -from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.base import ( - traits, - TraitedSpec, BaseInterfaceInputSpec, - SimpleInterface, File, InputMultiObject, OutputMultiObject, + SimpleInterface, + TraitedSpec, isdefined, + traits, ) +from nipype.utils.filemanip import fname_presuffix - -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _RegridToZoomsInputSpec(BaseInterfaceInputSpec): - in_file = File( - exists=True, mandatory=True, desc="a file whose resolution is to change" - ) + in_file = File(exists=True, mandatory=True, desc='a file whose resolution is to change') zooms = traits.Tuple( traits.Float, traits.Float, traits.Float, mandatory=True, - desc="the new resolution", + desc='the new resolution', ) - order = traits.Int(3, usedefault=True, desc="order of interpolator") + order = traits.Int(3, usedefault=True, desc='order of interpolator') clip = traits.Bool( True, usedefault=True, @@ -65,12 +63,12 @@ class _RegridToZoomsInputSpec(BaseInterfaceInputSpec): traits.Float(), default=False, usedefault=True, - desc="apply gaussian smoothing before resampling", + desc='apply gaussian smoothing before resampling', ) class _RegridToZoomsOutputSpec(TraitedSpec): - out_file = File(exists=True, dec="the regridded file") + out_file = File(exists=True, dec='the regridded file') class RegridToZooms(SimpleInterface): @@ -82,8 +80,8 @@ class RegridToZooms(SimpleInterface): def _run_interface(self, runtime): from ..utils.images import resample_by_spacing - self._results["out_file"] = fname_presuffix( - self.inputs.in_file, suffix="_regrid", newpath=runtime.cwd + self._results['out_file'] = fname_presuffix( + self.inputs.in_file, suffix='_regrid', newpath=runtime.cwd ) resample_by_spacing( self.inputs.in_file, @@ -91,13 +89,13 @@ def _run_interface(self, runtime): order=self.inputs.order, clip=self.inputs.clip, smooth=self.inputs.smooth, - ).to_filename(self._results["out_file"]) + ).to_filename(self._results['out_file']) return runtime class _IntraModalMergeInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiObject(File(exists=True), mandatory=True, desc="input files") - in_mask = File(exists=True, desc="input mask for grand mean scaling") + in_files = InputMultiObject(File(exists=True), mandatory=True, desc='input files') + in_mask = File(exists=True, desc='input mask for grand mean scaling') hmc = traits.Bool(True, usedefault=True) zero_based_avg = traits.Bool(True, usedefault=True) to_ras = traits.Bool(True, usedefault=True) @@ -105,10 +103,10 @@ class _IntraModalMergeInputSpec(BaseInterfaceInputSpec): class _IntraModalMergeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="merged image") - out_avg = File(exists=True, desc="average image") - out_mats = OutputMultiObject(File(exists=True), desc="output matrices") - out_movpar = OutputMultiObject(File(exists=True), desc="output movement parameters") + out_file = File(exists=True, desc='merged image') + out_avg = File(exists=True, desc='average image') + out_mats = OutputMultiObject(File(exists=True), desc='output matrices') + out_movpar = OutputMultiObject(File(exists=True), desc='output movement parameters') class IntraModalMerge(SimpleInterface): @@ -135,11 +133,11 @@ def _run_interface(self, runtime): nii_list = [] # Remove one-sized extra dimensions - for i, f in enumerate(in_files): + for f in in_files: filenii = nb.load(f) filenii = nb.squeeze_image(filenii) if len(filenii.shape) == 5: - raise RuntimeError("Input image (%s) is 5D." % f) + raise RuntimeError(f'Input image ({f}) is 5D.') if filenii.dataobj.ndim == 4: nii_list += nb.four_to_three(filenii) else: @@ -151,11 +149,11 @@ def _run_interface(self, runtime): filenii = nii_list[0] merged_fname = fname_presuffix( - self.inputs.in_files[0], suffix="_merged", newpath=runtime.cwd + self.inputs.in_files[0], suffix='_merged', newpath=runtime.cwd ) filenii.to_filename(merged_fname) - self._results["out_file"] = merged_fname - self._results["out_avg"] = merged_fname + self._results['out_file'] = merged_fname + self._results['out_avg'] = merged_fname if filenii.dataobj.ndim < 4: # TODO: generate identity out_mats and zero-filled out_movpar @@ -165,7 +163,7 @@ def _run_interface(self, runtime): from nipype.interfaces.fsl import MCFLIRT mcflirt = MCFLIRT( - cost="normcorr", + cost='normcorr', save_mats=True, save_plots=True, ref_vol=0, @@ -173,23 +171,21 @@ def _run_interface(self, runtime): ) mcres = mcflirt.run() filenii = nb.load(mcres.outputs.out_file) - self._results["out_file"] = mcres.outputs.out_file - self._results["out_mats"] = mcres.outputs.mat_file - self._results["out_movpar"] = mcres.outputs.par_file + self._results['out_file'] = mcres.outputs.out_file + self._results['out_mats'] = mcres.outputs.mat_file + self._results['out_movpar'] = mcres.outputs.par_file - hmcdata = filenii.get_fdata(dtype="float32") + hmcdata = filenii.get_fdata(dtype='float32') if self.inputs.grand_mean_scaling: if not isdefined(self.inputs.in_mask): mean = np.median(hmcdata, axis=-1) thres = np.percentile(mean, 25) mask = mean > thres else: - mask = nb.load(self.inputs.in_mask).get_fdata(dtype="float32") > 0.5 + mask = nb.load(self.inputs.in_mask).get_fdata(dtype='float32') > 0.5 nimgs = hmcdata.shape[-1] - means = np.median( - hmcdata[mask[..., np.newaxis]].reshape((-1, nimgs)).T, axis=-1 - ) + means = np.median(hmcdata[mask[..., np.newaxis]].reshape((-1, nimgs)).T, axis=-1) max_mean = means.max() for i in range(nimgs): hmcdata[..., i] *= max_mean / means[i] @@ -198,11 +194,11 @@ def _run_interface(self, runtime): if self.inputs.zero_based_avg: hmcdata -= hmcdata.min() - self._results["out_avg"] = fname_presuffix( - self.inputs.in_files[0], suffix="_avg", newpath=runtime.cwd + self._results['out_avg'] = fname_presuffix( + self.inputs.in_files[0], suffix='_avg', newpath=runtime.cwd ) nb.Nifti1Image(hmcdata, filenii.affine, filenii.header).to_filename( - self._results["out_avg"] + self._results['out_avg'] ) return runtime @@ -212,33 +208,33 @@ class _RobustAverageInputSpec(BaseInterfaceInputSpec): in_file = File( exists=True, mandatory=True, - desc="Either a 3D reference or 4D file to average through the last axis" + desc='Either a 3D reference or 4D file to average through the last axis', ) - t_mask = traits.List(traits.Bool, desc="List of selected timepoints to be averaged") + t_mask = traits.List(traits.Bool, desc='List of selected timepoints to be averaged') mc_method = traits.Enum( - "AFNI", - "FSL", + 'AFNI', + 'FSL', None, usedefault=True, - desc="Which software to use to perform motion correction", + desc='Which software to use to perform motion correction', ) nonnegative = traits.Bool( - True, usedefault=True, desc="whether the output should be clipped below zero" + True, usedefault=True, desc='whether the output should be clipped below zero' ) - num_threads = traits.Int(desc="number of threads") + num_threads = traits.Int(desc='number of threads') two_pass = traits.Bool( - True, usedefault=True, desc="whether two passes of correction is necessary" + True, usedefault=True, desc='whether two passes of correction is necessary' ) class _RobustAverageOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="the averaged image") - out_volumes = File(exists=True, desc="the volumes selected that have been averaged") + out_file = File(exists=True, desc='the averaged image') + out_volumes = File(exists=True, desc='the volumes selected that have been averaged') out_drift = traits.List( - traits.Float, desc="the ratio to the grand mean or global signal drift" + traits.Float, desc='the ratio to the grand mean or global signal drift' ) - out_hmc = OutputMultiObject(File(exists=True), desc="head-motion correction matrices") - out_hmc_volumes = OutputMultiObject(File(exists=True), desc="head-motion correction volumes") + out_hmc = OutputMultiObject(File(exists=True), desc='head-motion correction matrices') + out_hmc_volumes = OutputMultiObject(File(exists=True), desc='head-motion correction volumes') class RobustAverage(SimpleInterface): @@ -252,9 +248,9 @@ def _run_interface(self, runtime): # If reference is 3D, return it directly if img.dataobj.ndim == 3: - self._results["out_file"] = self.inputs.in_file - self._results["out_volumes"] = self.inputs.in_file - self._results["out_drift"] = [1.0] + self._results['out_file'] = self.inputs.in_file + self._results['out_volumes'] = self.inputs.in_file + self._results['out_drift'] = [1.0] return runtime fname = partial(fname_presuffix, self.inputs.in_file, newpath=runtime.cwd) @@ -267,34 +263,30 @@ def _run_interface(self, runtime): # If reference was 4D, but single-volume - write out squeezed and return. if img.dataobj.ndim == 3: - self._results["out_file"] = fname(suffix="_squeezed") - img.to_filename(self._results["out_file"]) - self._results["out_volumes"] = self.inputs.in_file - self._results["out_drift"] = [1.0] + self._results['out_file'] = fname(suffix='_squeezed') + img.to_filename(self._results['out_file']) + self._results['out_volumes'] = self.inputs.in_file + self._results['out_drift'] = [1.0] return runtime img_len = img.shape[3] - t_mask = ( - self.inputs.t_mask if isdefined(self.inputs.t_mask) else [True] * img_len - ) + t_mask = self.inputs.t_mask if isdefined(self.inputs.t_mask) else [True] * img_len if len(t_mask) != img_len: raise ValueError( - f"Image length ({img_len} timepoints) unmatched by mask ({len(t_mask)})" + f'Image length ({img_len} timepoints) unmatched by mask ({len(t_mask)})' ) n_volumes = sum(t_mask) if n_volumes < 1: - raise ValueError("At least one volume should be selected for slicing") + raise ValueError('At least one volume should be selected for slicing') - self._results["out_file"] = fname(suffix="_average") - self._results["out_volumes"] = fname(suffix="_sliced") + self._results['out_file'] = fname(suffix='_average') + self._results['out_volumes'] = fname(suffix='_sliced') - sliced = nb.concat_images( - i for i, t in zip(nb.four_to_three(img), t_mask) if t - ) + sliced = nb.concat_images(i for i, t in zip(nb.four_to_three(img), t_mask) if t) - data = sliced.get_fdata(dtype="float32") + data = sliced.get_fdata(dtype='float32') # Data can come with outliers showing very high numbers - preemptively prune data = np.clip( data, @@ -304,7 +296,7 @@ def _run_interface(self, runtime): gs_drift = np.mean(data, axis=(0, 1, 2)) gs_drift /= gs_drift.max() - self._results["out_drift"] = [float(i) for i in gs_drift] + self._results['out_drift'] = [float(i) for i in gs_drift] data /= gs_drift data = np.clip( @@ -313,43 +305,43 @@ def _run_interface(self, runtime): a_max=data.max(), ) sliced.__class__(data, sliced.affine, sliced.header).to_filename( - self._results["out_volumes"] + self._results['out_volumes'] ) if n_volumes == 1: - nb.squeeze_image(sliced).to_filename(self._results["out_file"]) - self._results["out_drift"] = [1.0] + nb.squeeze_image(sliced).to_filename(self._results['out_file']) + self._results['out_drift'] = [1.0] return runtime - if self.inputs.mc_method == "AFNI": + if self.inputs.mc_method == 'AFNI': from nipype.interfaces.afni import Volreg volreg = Volreg( - in_file=self._results["out_volumes"], - interp="Fourier", - args="-twopass" if self.inputs.two_pass else "", + in_file=self._results['out_volumes'], + interp='Fourier', + args='-twopass' if self.inputs.two_pass else '', zpad=4, - outputtype="NIFTI_GZ", + outputtype='NIFTI_GZ', ) if isdefined(self.inputs.num_threads): volreg.inputs.num_threads = self.inputs.num_threads res = volreg.run() - self._results["out_hmc"] = res.outputs.oned_matrix_save + self._results['out_hmc'] = res.outputs.oned_matrix_save - elif self.inputs.mc_method == "FSL": + elif self.inputs.mc_method == 'FSL': from nipype.interfaces.fsl import MCFLIRT res = MCFLIRT( - in_file=self._results["out_volumes"], + in_file=self._results['out_volumes'], ref_vol=0, - interpolation="sinc", + interpolation='sinc', ).run() - self._results["out_hmc"] = res.outputs.mat_file + self._results['out_hmc'] = res.outputs.mat_file if self.inputs.mc_method: - self._results["out_hmc_volumes"] = res.outputs.out_file - data = nb.load(res.outputs.out_file).get_fdata(dtype="float32") + self._results['out_hmc_volumes'] = res.outputs.out_file + data = nb.load(res.outputs.out_file).get_fdata(dtype='float32') data = np.clip( data, @@ -357,9 +349,9 @@ def _run_interface(self, runtime): a_max=data.max(), ) - sliced.__class__( - np.median(data, axis=3), sliced.affine, sliced.header - ).to_filename(self._results["out_file"]) + sliced.__class__(np.median(data, axis=3), sliced.affine, sliced.header).to_filename( + self._results['out_file'] + ) return runtime @@ -378,31 +370,31 @@ def _run_interface(self, runtime): class _TemplateDimensionsInputSpec(BaseInterfaceInputSpec): - anat_type = traits.Enum("T1w", "T2w", usedefault=True, desc="Anatomical image type") + anat_type = traits.Enum('T1w', 'T2w', usedefault=True, desc='Anatomical image type') anat_list = InputMultiObject( - File(exists=True), xor=["t1w_list"], desc="input anatomical images" + File(exists=True), xor=['t1w_list'], desc='input anatomical images' ) t1w_list = InputMultiObject( File(exists=True), - xor=["anat_list"], - deprecated="1.14.0", - new_name="anat_list", + xor=['anat_list'], + deprecated='1.14.0', + new_name='anat_list', ) max_scale = traits.Float( - 3.0, usedefault=True, desc="Maximum scaling factor in images to accept" + 3.0, usedefault=True, desc='Maximum scaling factor in images to accept' ) class _TemplateDimensionsOutputSpec(TraitedSpec): - t1w_valid_list = OutputMultiObject(exists=True, desc="valid T1w images") - anat_valid_list = OutputMultiObject(exists=True, desc="valid anatomical images") + t1w_valid_list = OutputMultiObject(exists=True, desc='valid T1w images') + anat_valid_list = OutputMultiObject(exists=True, desc='valid anatomical images') target_zooms = traits.Tuple( - traits.Float, traits.Float, traits.Float, desc="Target zoom information" + traits.Float, traits.Float, traits.Float, desc='Target zoom information' ) target_shape = traits.Tuple( - traits.Int, traits.Int, traits.Int, desc="Target shape information" + traits.Int, traits.Int, traits.Int, desc='Target shape information' ) - out_report = File(exists=True, desc="conformation report") + out_report = File(exists=True, desc='conformation report') class TemplateDimensions(SimpleInterface): @@ -429,14 +421,12 @@ def _generate_segment(self, discards, dims, zooms): DISCARD_TEMPLATE.format(path=path, basename=os.path.basename(path)) for path in discards ] - discard_list = ( - "\n".join(["\t\t\t
    "] + items + ["\t\t\t
"]) if items else "" - ) - zoom_fmt = "{:.02g}mm x {:.02g}mm x {:.02g}mm".format(*zooms) + discard_list = '\n'.join(['\t\t\t
    '] + items + ['\t\t\t
']) if items else '' + zoom_fmt = '{:.02g}mm x {:.02g}mm x {:.02g}mm'.format(*zooms) return CONFORMATION_TEMPLATE.format( anat=self.inputs.anat_type, n_anat=len(self.inputs.anat_list), - dims="x".join(map(str, dims)), + dims='x'.join(map(str, dims)), zooms=zoom_fmt, n_discards=len(discards), discard_list=discard_list, @@ -464,41 +454,41 @@ def _run_interface(self, runtime): # Ignore dropped images valid_fnames = np.atleast_1d(in_names[valid]).tolist() - self._results["anat_valid_list"] = valid_fnames - self._results["t1w_valid_list"] = valid_fnames # Deprecate: 1.14.0 + self._results['anat_valid_list'] = valid_fnames + self._results['t1w_valid_list'] = valid_fnames # Deprecate: 1.14.0 # Set target shape information target_zooms = all_zooms[valid].min(axis=0) target_shape = all_shapes[valid].max(axis=0) - self._results["target_zooms"] = tuple(target_zooms.tolist()) - self._results["target_shape"] = tuple(target_shape.tolist()) + self._results['target_zooms'] = tuple(target_zooms.tolist()) + self._results['target_shape'] = tuple(target_shape.tolist()) # Create report dropped_images = in_names[~valid] segment = self._generate_segment(dropped_images, target_shape, target_zooms) - out_report = os.path.join(runtime.cwd, "report.html") - with open(out_report, "w") as fobj: + out_report = os.path.join(runtime.cwd, 'report.html') + with open(out_report, 'w') as fobj: fobj.write(segment) - self._results["out_report"] = out_report + self._results['out_report'] = out_report return runtime class _ConformInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="Input image") + in_file = File(exists=True, mandatory=True, desc='Input image') target_zooms = traits.Tuple( - traits.Float, traits.Float, traits.Float, desc="Target zoom information" + traits.Float, traits.Float, traits.Float, desc='Target zoom information' ) target_shape = traits.Tuple( - traits.Int, traits.Int, traits.Int, desc="Target shape information" + traits.Int, traits.Int, traits.Int, desc='Target shape information' ) class _ConformOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Conformed image") - transform = File(exists=True, desc="Conformation transform (voxel-to-voxel)") + out_file = File(exists=True, desc='Conformed image') + transform = File(exists=True, desc='Conformation transform (voxel-to-voxel)') class Conform(SimpleInterface): @@ -533,22 +523,20 @@ def _run_interface(self, runtime): shape = np.array(reoriented.shape[:3]) # Reconstruct transform from orig to reoriented image - ornt_xfm = nb.orientations.inv_ornt_aff( - nb.io_orientation(orig_img.affine), orig_img.shape - ) + ornt_xfm = nb.orientations.inv_ornt_aff(nb.io_orientation(orig_img.affine), orig_img.shape) # Identity unless proven otherwise target_affine = reoriented.affine.copy() conform_xfm = np.eye(4) xyz_unit = reoriented.header.get_xyzt_units()[0] - if xyz_unit == "unknown": + if xyz_unit == 'unknown': # Common assumption; if we're wrong, unlikely to be the only thing that breaks - xyz_unit = "mm" + xyz_unit = 'mm' # Set a 0.05mm threshold to performing rescaling - atol_gross = {"meter": 5e-5, "mm": 0.05, "micron": 50}[xyz_unit] + atol_gross = {'meter': 5e-5, 'mm': 0.05, 'micron': 50}[xyz_unit] # if 0.01 > difference > 0.001mm, freesurfer won't be able to merge the images - atol_fine = {"meter": 1e-6, "mm": 0.001, "micron": 1}[xyz_unit] + atol_fine = {'meter': 1e-6, 'mm': 0.001, 'micron': 1}[xyz_unit] # Update zooms => Modify affine # Rescale => Resample to resized voxels @@ -561,18 +549,14 @@ def _run_interface(self, runtime): # Use an affine with the corrected zooms, whether or not we resample if update_zooms: scale_factor = target_zooms / zooms - target_affine[:3, :3] = reoriented.affine[:3, :3] @ np.diag( - scale_factor - ) + target_affine[:3, :3] = reoriented.affine[:3, :3] @ np.diag(scale_factor) if resize: # The shift is applied after scaling. # Use a proportional shift to maintain relative position in dataset size_factor = target_span / (zooms * shape) # Use integer shifts to avoid unnecessary interpolation - offset = ( - reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3] - ) + offset = reoriented.affine[:3, 3] * size_factor - reoriented.affine[:3, 3] target_affine[:3, 3] = reoriented.affine[:3, 3] + offset.astype(int) conform_xfm = np.linalg.inv(reoriented.affine) @ target_affine @@ -587,29 +571,27 @@ def _run_interface(self, runtime): # Image may be reoriented, rescaled, and/or resized if reoriented is not orig_img: - out_name = fname_presuffix(fname, suffix="_ras", newpath=runtime.cwd) + out_name = fname_presuffix(fname, suffix='_ras', newpath=runtime.cwd) reoriented.to_filename(out_name) else: out_name = fname transform = ornt_xfm.dot(conform_xfm) if not np.allclose(orig_img.affine.dot(transform), target_affine): - raise ValueError("Original and target affines are not similar") + raise ValueError('Original and target affines are not similar') - mat_name = fname_presuffix( - fname, suffix=".mat", newpath=runtime.cwd, use_ext=False - ) - np.savetxt(mat_name, transform, fmt="%.08f") + mat_name = fname_presuffix(fname, suffix='.mat', newpath=runtime.cwd, use_ext=False) + np.savetxt(mat_name, transform, fmt='%.08f') - self._results["out_file"] = out_name - self._results["transform"] = mat_name + self._results['out_file'] = out_name + self._results['transform'] = mat_name return runtime def reorient(in_file, newpath=None): """Reorient Nifti files to RAS.""" - out_file = fname_presuffix(in_file, suffix="_ras", newpath=newpath) + out_file = fname_presuffix(in_file, suffix='_ras', newpath=newpath) nb.as_closest_canonical(nb.load(in_file)).to_filename(out_file) return out_file @@ -655,47 +637,46 @@ def normalize_xform(img): class _SignalExtractionInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="4-D fMRI nii file") + in_file = File(exists=True, mandatory=True, desc='4-D fMRI nii file') label_files = InputMultiObject( File(exists=True), mandatory=True, - desc="a 3D label image, with 0 denoting " - "background, or a list of 3D probability " - "maps (one per label) or the equivalent 4D " - "file.", + desc='a 3D label image, with 0 denoting ' + 'background, or a list of 3D probability ' + 'maps (one per label) or the equivalent 4D ' + 'file.', ) prob_thres = traits.Range( low=0.0, high=1.0, value=0.5, usedefault=True, - desc="If label_files are probability masks, threshold " - "at specified probability.", + desc='If label_files are probability masks, threshold at specified probability.', ) class_labels = traits.List( mandatory=True, - desc="Human-readable labels for each segment " - "in the label file, in order. The length of " - "class_labels must be equal to the number of " - "segments (background excluded). This list " - "corresponds to the class labels in label_file " - "in ascending order", + desc='Human-readable labels for each segment ' + 'in the label file, in order. The length of ' + 'class_labels must be equal to the number of ' + 'segments (background excluded). This list ' + 'corresponds to the class labels in label_file ' + 'in ascending order', ) out_file = File( - "signals.tsv", + 'signals.tsv', usedefault=True, exists=False, - desc="The name of the file to output to. signals.tsv by default", + desc='The name of the file to output to. signals.tsv by default', ) class _SignalExtractionOutputSpec(TraitedSpec): out_file = File( exists=True, - desc="tsv file containing the computed " - "signals, with as many columns as there are labels and as " - "many rows as there are timepoints in in_file, plus a " - "header row with values from class_labels", + desc='tsv file containing the computed ' + 'signals, with as many columns as there are labels and as ' + 'many rows as there are timepoints in in_file, plus a ' + 'header row with values from class_labels', ) @@ -721,16 +702,15 @@ def _run_interface(self, runtime): # This check assumes all input masks have same dimensions if img.shape[:3] != mask_imgs[0].shape[:3]: raise NotImplementedError( - "Input image and mask should be of " - "same dimensions before running SignalExtraction" + 'Input image and mask should be of ' + 'same dimensions before running SignalExtraction' ) # Load the mask. # If mask is a list, each mask is treated as its own ROI/parcel # If mask is a 3D, each integer is treated as its own ROI/parcel if len(mask_imgs) > 1: masks = [ - np.asanyarray(mask_img.dataobj) >= self.inputs.prob_thres - for mask_img in mask_imgs + np.asanyarray(mask_img.dataobj) >= self.inputs.prob_thres for mask_img in mask_imgs ] else: labelsmap = np.asanyarray(mask_imgs[0].dataobj) @@ -739,7 +719,7 @@ def _run_interface(self, runtime): masks = [labelsmap == label for label in labels] if len(masks) != len(self.inputs.class_labels): - raise ValueError("Number of masks must match number of labels") + raise ValueError('Number of masks must match number of labels') series = np.zeros((img.shape[3], len(masks))) @@ -748,7 +728,7 @@ def _run_interface(self, runtime): series[:, j] = data[mask, :].mean(axis=0) output = np.vstack((self.inputs.class_labels, series.astype(str))) - self._results["out_file"] = os.path.join(runtime.cwd, self.inputs.out_file) - np.savetxt(self._results["out_file"], output, fmt="%s", delimiter="\t") + self._results['out_file'] = os.path.join(runtime.cwd, self.inputs.out_file) + np.savetxt(self._results['out_file'], output, fmt='%s', delimiter='\t') return runtime diff --git a/niworkflows/interfaces/itk.py b/niworkflows/interfaces/itk.py index ce9ad7fed22..86e532dbf47 100644 --- a/niworkflows/interfaces/itk.py +++ b/niworkflows/interfaces/itk.py @@ -21,6 +21,7 @@ # https://www.nipreps.org/community/licensing/ # """ITK files handling.""" + import os from mimetypes import guess_type from tempfile import TemporaryDirectory @@ -43,20 +44,20 @@ from .fixes import _FixTraitApplyTransformsInputSpec -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class _MCFLIRT2ITKInputSpec(BaseInterfaceInputSpec): in_files = InputMultiObject( - File(exists=True), mandatory=True, desc="list of MAT files from MCFLIRT" + File(exists=True), mandatory=True, desc='list of MAT files from MCFLIRT' ) - in_reference = File(exists=True, mandatory=True, desc="input image for spatial reference") - in_source = File(exists=True, mandatory=True, desc="input image for spatial source") - num_threads = traits.Int(nohash=True, desc="number of parallel processes") + in_reference = File(exists=True, mandatory=True, desc='input image for spatial reference') + in_source = File(exists=True, mandatory=True, desc='input image for spatial source') + num_threads = traits.Int(nohash=True, desc='number of parallel processes') class _MCFLIRT2ITKOutputSpec(TraitedSpec): - out_file = File(desc="the output ITKTransform file") + out_file = File(desc='the output ITKTransform file') class MCFLIRT2ITK(SimpleInterface): @@ -67,7 +68,7 @@ class MCFLIRT2ITK(SimpleInterface): def _run_interface(self, runtime): if isdefined(self.inputs.num_threads): - LOGGER.warning("Multithreading is deprecated. Remove the num_threads input.") + LOGGER.warning('Multithreading is deprecated. Remove the num_threads input.') source = nb.load(self.inputs.in_source) reference = nb.load(self.inputs.in_reference) @@ -80,8 +81,8 @@ def _run_interface(self, runtime): np.stack([a.matrix for a in affines], axis=0), ) - self._results["out_file"] = os.path.join(runtime.cwd, "mat2itk.txt") - affarray.to_filename(self._results["out_file"]) + self._results['out_file'] = os.path.join(runtime.cwd, 'mat2itk.txt') + affarray.to_filename(self._results['out_file']) return runtime @@ -90,23 +91,19 @@ class _MultiApplyTransformsInputSpec(_FixTraitApplyTransformsInputSpec): input_image = InputMultiObject( File(exists=True), mandatory=True, - desc="input time-series as a list of volumes after splitting" - " through the fourth dimension", - ) - num_threads = traits.Int( - 1, usedefault=True, nohash=True, desc="number of parallel processes" + desc='input time-series as a list of volumes after splitting' + ' through the fourth dimension', ) + num_threads = traits.Int(1, usedefault=True, nohash=True, desc='number of parallel processes') save_cmd = traits.Bool( - True, usedefault=True, desc="write a log of command lines that were applied" - ) - copy_dtype = traits.Bool( - False, usedefault=True, desc="copy dtype from inputs to outputs" + True, usedefault=True, desc='write a log of command lines that were applied' ) + copy_dtype = traits.Bool(False, usedefault=True, desc='copy dtype from inputs to outputs') class _MultiApplyTransformsOutputSpec(TraitedSpec): - out_files = OutputMultiObject(File(), desc="the output ITKTransform file") - log_cmdline = File(desc="a list of command lines used to apply transforms") + out_files = OutputMultiObject(File(), desc='the output ITKTransform file') + log_cmdline = File(desc='a list of command lines used to apply transforms') class MultiApplyTransforms(SimpleInterface): @@ -120,25 +117,23 @@ def _run_interface(self, runtime): ifargs = self.inputs.get() # Extract number of input images and transforms - in_files = ifargs.pop("input_image") + in_files = ifargs.pop('input_image') num_files = len(in_files) - transforms = ifargs.pop("transforms") + transforms = ifargs.pop('transforms') # Get number of parallel jobs - num_threads = ifargs.pop("num_threads") - save_cmd = ifargs.pop("save_cmd") + num_threads = ifargs.pop('num_threads') + save_cmd = ifargs.pop('save_cmd') # Remove certain keys - for key in ["environ", "ignore_exception", "terminal_output", "output_image"]: + for key in ['environ', 'ignore_exception', 'terminal_output', 'output_image']: ifargs.pop(key, None) # Get a temp folder ready - tmp_folder = TemporaryDirectory(prefix="tmp-", dir=runtime.cwd) + tmp_folder = TemporaryDirectory(prefix='tmp-', dir=runtime.cwd) xfms_list = _arrange_xfms(transforms, num_files, tmp_folder) if len(xfms_list) != num_files: - raise ValueError( - "Number of files and entries in the transforms list do not match" - ) + raise ValueError('Number of files and entries in the transforms list do not match') # Inputs are ready to run in parallel if num_threads < 1: @@ -158,21 +153,19 @@ def _run_interface(self, runtime): _applytfms, [ (in_file, in_xfm, ifargs, i, runtime.cwd) - for i, (in_file, in_xfm) in enumerate( - zip(in_files, xfms_list) - ) + for i, (in_file, in_xfm) in enumerate(zip(in_files, xfms_list)) ], ) ) tmp_folder.cleanup() # Collect output file names, after sorting by index - self._results["out_files"] = [el[0] for el in out_files] + self._results['out_files'] = [el[0] for el in out_files] if save_cmd: - self._results["log_cmdline"] = os.path.join(runtime.cwd, "command.txt") - with open(self._results["log_cmdline"], "w") as cmdfile: - print("\n-------\n".join([el[1] for el in out_files]), file=cmdfile) + self._results['log_cmdline'] = os.path.join(runtime.cwd, 'command.txt') + with open(self._results['log_cmdline'], 'w') as cmdfile: + print('\n-------\n'.join([el[1] for el in out_files]), file=cmdfile) return runtime @@ -184,18 +177,19 @@ def _applytfms(args): """ import nibabel as nb from nipype.utils.filemanip import fname_presuffix + from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms in_file, in_xform, ifargs, index, newpath = args out_file = fname_presuffix( - in_file, suffix="_xform-%05d" % index, newpath=newpath, use_ext=True + in_file, suffix='_xform-%05d' % index, newpath=newpath, use_ext=True ) - copy_dtype = ifargs.pop("copy_dtype", False) + copy_dtype = ifargs.pop('copy_dtype', False) xfm = ApplyTransforms( input_image=in_file, transforms=in_xform, output_image=out_file, **ifargs ) - xfm.terminal_output = "allatonce" + xfm.terminal_output = 'allatonce' xfm.resource_monitor = False runtime = xfm.run().runtime @@ -216,16 +210,16 @@ def _arrange_xfms(transforms, num_files, tmp_folder): Convenience method to arrange the list of transforms that should be applied to each input file """ - base_xform = ["#Insight Transform File V1.0", "#Transform 0"] + base_xform = ['#Insight Transform File V1.0', '#Transform 0'] # Initialize the transforms matrix xfms_T = [] for i, tf_file in enumerate(transforms): - if tf_file == "identity": + if tf_file == 'identity': xfms_T.append([tf_file] * num_files) continue # If it is a deformation field, copy to the tfs_matrix directly - if guess_type(tf_file)[0] != "text/plain": + if guess_type(tf_file)[0] != 'text/plain': xfms_T.append([tf_file] * num_files) continue @@ -233,15 +227,15 @@ def _arrange_xfms(transforms, num_files, tmp_folder): tfdata = tf_fh.read().strip() # If it is not an ITK transform file, copy to the tfs_matrix directly - if not tfdata.startswith("#Insight Transform File"): + if not tfdata.startswith('#Insight Transform File'): xfms_T.append([tf_file] * num_files) continue # Count number of transforms in ITK transform file - nxforms = tfdata.count("#Transform") + nxforms = tfdata.count('#Transform') # Remove first line - tfdata = tfdata.split("\n")[1:] + tfdata = tfdata.split('\n')[1:] # If it is a ITK transform file with only 1 xform, copy to the tfs_matrix directly if nxforms == 1: @@ -250,23 +244,23 @@ def _arrange_xfms(transforms, num_files, tmp_folder): if nxforms != num_files: raise RuntimeError( - "Number of transforms (%d) found in the ITK file does not match" - " the number of input image files (%d)." % (nxforms, num_files) + 'Number of transforms (%d) found in the ITK file does not match' + ' the number of input image files (%d).' % (nxforms, num_files) ) # At this point splitting transforms will be necessary, generate a base name out_base = fname_presuffix( - tf_file, suffix="_pos-%03d_xfm-{:05d}" % i, newpath=tmp_folder.name + tf_file, suffix='_pos-%03d_xfm-{:05d}' % i, newpath=tmp_folder.name ).format # Split combined ITK transforms file split_xfms = [] for xform_i in range(nxforms): # Find start token to extract - startidx = tfdata.index("#Transform %d" % xform_i) - next_xform = base_xform + tfdata[startidx + 1:startidx + 4] + [""] + startidx = tfdata.index('#Transform %d' % xform_i) + next_xform = base_xform + tfdata[startidx + 1 : startidx + 4] + [''] xfm_file = out_base(xform_i) - with open(xfm_file, "w") as out_xfm: - out_xfm.write("\n".join(next_xform)) + with open(xfm_file, 'w') as out_xfm: + out_xfm.write('\n'.join(next_xform)) split_xfms.append(xfm_file) xfms_T.append(split_xfms) diff --git a/niworkflows/interfaces/morphology.py b/niworkflows/interfaces/morphology.py index 8591911f144..3d099f98cf7 100644 --- a/niworkflows/interfaces/morphology.py +++ b/niworkflows/interfaces/morphology.py @@ -21,26 +21,27 @@ # https://www.nipreps.org/community/licensing/ # """Mathematical morphology operations as nipype interfaces.""" + from pathlib import Path -import numpy as np -import nibabel as nb +import nibabel as nb +import numpy as np from nipype.interfaces.base import ( - traits, - TraitedSpec, BaseInterfaceInputSpec, File, SimpleInterface, + TraitedSpec, + traits, ) class _BinaryDilationInputSpec(BaseInterfaceInputSpec): - in_mask = File(exists=True, mandatory=True, desc="input mask") - radius = traits.Int(2, usedefault=True, desc="Radius of dilation") + in_mask = File(exists=True, mandatory=True, desc='input mask') + radius = traits.Int(2, usedefault=True, desc='Radius of dilation') class _BinaryDilationOutputSpec(TraitedSpec): - out_mask = File(exists=False, desc="dilated mask") + out_mask = File(exists=False, desc='dilated mask') class BinaryDilation(SimpleInterface): @@ -59,21 +60,21 @@ def _run_interface(self, runtime): maskdata, radius=self.inputs.radius, ) - out_file = str((Path(runtime.cwd) / "dilated_mask.nii.gz").absolute()) + out_file = str((Path(runtime.cwd) / 'dilated_mask.nii.gz').absolute()) out_img = mask_img.__class__(dilated, mask_img.affine, mask_img.header) - out_img.set_data_dtype("uint8") + out_img.set_data_dtype('uint8') out_img.to_filename(out_file) - self._results["out_mask"] = out_file + self._results['out_mask'] = out_file return runtime class _BinarySubtractInputSpec(BaseInterfaceInputSpec): - in_base = File(exists=True, mandatory=True, desc="input base mask") - in_subtract = File(exists=True, mandatory=True, desc="input subtract mask") + in_base = File(exists=True, mandatory=True, desc='input base mask') + in_subtract = File(exists=True, mandatory=True, desc='input subtract mask') class _BinarySubtractionOutputSpec(TraitedSpec): - out_mask = File(exists=False, desc="subtracted mask") + out_mask = File(exists=False, desc='subtracted mask') class BinarySubtraction(SimpleInterface): @@ -88,15 +89,11 @@ def _run_interface(self, runtime): data = np.bool_(base_img.dataobj) data[np.bool_(nb.load(self.inputs.in_subtract).dataobj)] = False - out_file = str((Path(runtime.cwd) / "subtracted_mask.nii.gz").absolute()) - out_img = base_img.__class__( - data, - base_img.affine, - base_img.header - ) - out_img.set_data_dtype("uint8") + out_file = str((Path(runtime.cwd) / 'subtracted_mask.nii.gz').absolute()) + out_img = base_img.__class__(data, base_img.affine, base_img.header) + out_img.set_data_dtype('uint8') out_img.to_filename(out_file) - self._results["out_mask"] = out_file + self._results['out_mask'] = out_file return runtime diff --git a/niworkflows/interfaces/nibabel.py b/niworkflows/interfaces/nibabel.py index 611870a8386..d041164a692 100644 --- a/niworkflows/interfaces/nibabel.py +++ b/niworkflows/interfaces/nibabel.py @@ -21,36 +21,37 @@ # https://www.nipreps.org/community/licensing/ # """Nibabel-based interfaces.""" + from pathlib import Path from warnings import warn -import numpy as np import nibabel as nb +import numpy as np from nipype import logging -from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.base import ( - traits, - TraitedSpec, BaseInterfaceInputSpec, File, - SimpleInterface, - OutputMultiObject, InputMultiObject, + OutputMultiObject, + SimpleInterface, + TraitedSpec, + traits, ) +from nipype.utils.filemanip import fname_presuffix -IFLOGGER = logging.getLogger("nipype.interface") +IFLOGGER = logging.getLogger('nipype.interface') class _ApplyMaskInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="an image") - in_mask = File(exists=True, mandatory=True, desc="a mask") + in_file = File(exists=True, mandatory=True, desc='an image') + in_mask = File(exists=True, mandatory=True, desc='a mask') threshold = traits.Float( - 0.5, usedefault=True, desc="a threshold to the mask, if it is nonbinary" + 0.5, usedefault=True, desc='a threshold to the mask, if it is nonbinary' ) class _ApplyMaskOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="masked file") + out_file = File(exists=True, desc='masked file') class ApplyMask(SimpleInterface): @@ -64,32 +65,32 @@ def _run_interface(self, runtime): msknii = nb.load(self.inputs.in_mask) msk = msknii.get_fdata() > self.inputs.threshold - self._results["out_file"] = fname_presuffix( - self.inputs.in_file, suffix="_masked", newpath=runtime.cwd + self._results['out_file'] = fname_presuffix( + self.inputs.in_file, suffix='_masked', newpath=runtime.cwd ) if img.dataobj.shape[:3] != msk.shape: - raise ValueError("Image and mask sizes do not match.") + raise ValueError('Image and mask sizes do not match.') if not np.allclose(img.affine, msknii.affine): - raise ValueError("Image and mask affines are not similar enough.") + raise ValueError('Image and mask affines are not similar enough.') if img.dataobj.ndim == msk.ndim + 1: msk = msk[..., np.newaxis] masked = img.__class__(img.dataobj * msk, None, img.header) - masked.to_filename(self._results["out_file"]) + masked.to_filename(self._results['out_file']) return runtime class _BinarizeInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input image") - thresh_low = traits.Float(mandatory=True, desc="non-inclusive lower threshold") + in_file = File(exists=True, mandatory=True, desc='input image') + thresh_low = traits.Float(mandatory=True, desc='non-inclusive lower threshold') class _BinarizeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="masked file") - out_mask = File(exists=True, desc="output mask") + out_file = File(exists=True, desc='masked file') + out_mask = File(exists=True, desc='output mask') class Binarize(SimpleInterface): @@ -101,38 +102,39 @@ class Binarize(SimpleInterface): def _run_interface(self, runtime): img = nb.load(self.inputs.in_file) - self._results["out_file"] = fname_presuffix( - self.inputs.in_file, suffix="_masked", newpath=runtime.cwd + self._results['out_file'] = fname_presuffix( + self.inputs.in_file, suffix='_masked', newpath=runtime.cwd ) - self._results["out_mask"] = fname_presuffix( - self.inputs.in_file, suffix="_mask", newpath=runtime.cwd + self._results['out_mask'] = fname_presuffix( + self.inputs.in_file, suffix='_mask', newpath=runtime.cwd ) data = img.get_fdata() mask = data > self.inputs.thresh_low data[~mask] = 0.0 masked = img.__class__(data, img.affine, img.header) - masked.to_filename(self._results["out_file"]) + masked.to_filename(self._results['out_file']) - img.header.set_data_dtype("uint8") - maskimg = img.__class__(mask.astype("uint8"), img.affine, img.header) - maskimg.to_filename(self._results["out_mask"]) + img.header.set_data_dtype('uint8') + maskimg = img.__class__(mask.astype('uint8'), img.affine, img.header) + maskimg.to_filename(self._results['out_mask']) return runtime class _BinaryDilationInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="binary file to dilate") - radius = traits.Float(3, usedefault=True, desc="structure element (ball) radius") - iterations = traits.Range(low=0, value=1, usedefault=True, desc="repeat dilation") + in_file = File(exists=True, mandatory=True, desc='binary file to dilate') + radius = traits.Float(3, usedefault=True, desc='structure element (ball) radius') + iterations = traits.Range(low=0, value=1, usedefault=True, desc='repeat dilation') class _BinaryDilationOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="the input file, after binary dilation") + out_file = File(exists=True, desc='the input file, after binary dilation') class BinaryDilation(SimpleInterface): """Morphological binary dilation using Scipy.""" + # DEPRECATED in 1.7.0 # To remove in 1.9.0 @@ -140,15 +142,19 @@ class BinaryDilation(SimpleInterface): output_spec = _BinaryDilationOutputSpec def __init__(self, from_file=None, resource_monitor=None, **inputs): - warn("""\ + warn( + """\ niworkflows.interfaces.nibabel.BinaryDilation is deprecated in favor of niworkflows.interfaces.morphology.BinaryDilation. Please validate that interface for your use case and switch. -""", DeprecationWarning, stacklevel=2) +""", + DeprecationWarning, + stacklevel=2, + ) super().__init__(from_file=from_file, resource_monitor=resource_monitor, **inputs) def _run_interface(self, runtime): - self._results["out_file"] = _dilate( + self._results['out_file'] = _dilate( self.inputs.in_file, radius=self.inputs.radius, iterations=self.inputs.iterations, @@ -158,11 +164,11 @@ def _run_interface(self, runtime): class _SplitSeriesInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input 4d image") + in_file = File(exists=True, mandatory=True, desc='input 4d image') class _SplitSeriesOutputSpec(TraitedSpec): - out_files = OutputMultiObject(File(exists=True), desc="output list of 3d images") + out_files = OutputMultiObject(File(exists=True), desc='output list of 3d images') class SplitSeries(SimpleInterface): @@ -181,29 +187,25 @@ def _run_interface(self, runtime): img.dataobj.reshape(img.shape[:3] + extra_dims), img.affine, img.header ) - self._results["out_files"] = [] + self._results['out_files'] = [] for i, img_3d in enumerate(nb.four_to_three(img)): - out_file = fname_presuffix( - in_file, suffix=f"_idx-{i:03}", newpath=runtime.cwd - ) + out_file = fname_presuffix(in_file, suffix=f'_idx-{i:03}', newpath=runtime.cwd) img_3d.to_filename(out_file) - self._results["out_files"].append(out_file) + self._results['out_files'].append(out_file) return runtime class _MergeSeriesInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiObject( - File(exists=True, mandatory=True, desc="input list of 3d images") - ) + in_files = InputMultiObject(File(exists=True, mandatory=True, desc='input list of 3d images')) allow_4D = traits.Bool( - True, usedefault=True, desc="whether 4D images are allowed to be concatenated" + True, usedefault=True, desc='whether 4D images are allowed to be concatenated' ) - affine_tolerance = traits.Float(desc="Absolute tolerance allowed between image affines") + affine_tolerance = traits.Float(desc='Absolute tolerance allowed between image affines') class _MergeSeriesOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output 4d image") + out_file = File(exists=True, desc='output 4d image') class MergeSeries(SimpleInterface): @@ -222,8 +224,8 @@ def _run_interface(self, runtime): aff0 = filenii.affine elif not np.allclose(aff0, filenii.affine, atol=self.inputs.affine_tolerance): raise ValueError( - "Difference in affines greater than allowed tolerance " - f"{self.inputs.affine_tolerance}" + 'Difference in affines greater than allowed tolerance ' + f'{self.inputs.affine_tolerance}' ) ndim = filenii.dataobj.ndim if ndim == 3: @@ -233,29 +235,22 @@ def _run_interface(self, runtime): nii_list += nb.four_to_three(filenii) continue else: - raise ValueError( - f"Input image has an incorrect number of dimensions ({ndim})." - ) + raise ValueError(f'Input image has an incorrect number of dimensions ({ndim}).') - img_4d = nb.concat_images( - nii_list, - check_affines=not bool(self.inputs.affine_tolerance) - ) - out_file = fname_presuffix( - self.inputs.in_files[0], suffix="_merged", newpath=runtime.cwd - ) + img_4d = nb.concat_images(nii_list, check_affines=not bool(self.inputs.affine_tolerance)) + out_file = fname_presuffix(self.inputs.in_files[0], suffix='_merged', newpath=runtime.cwd) img_4d.to_filename(out_file) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime class _MergeROIsInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiObject(File(exists=True), desc="ROI files to be merged") + in_files = InputMultiObject(File(exists=True), desc='ROI files to be merged') class _MergeROIsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="NIfTI containing all ROIs") + out_file = File(exists=True, desc='NIfTI containing all ROIs') class MergeROIs(SimpleInterface): @@ -265,22 +260,20 @@ class MergeROIs(SimpleInterface): output_spec = _MergeROIsOutputSpec def _run_interface(self, runtime): - self._results["out_file"] = _merge_rois(self.inputs.in_files, newpath=runtime.cwd) + self._results['out_file'] = _merge_rois(self.inputs.in_files, newpath=runtime.cwd) return runtime class _RegridToZoomsInputSpec(BaseInterfaceInputSpec): - in_file = File( - exists=True, mandatory=True, desc="a file whose resolution is to change" - ) + in_file = File(exists=True, mandatory=True, desc='a file whose resolution is to change') zooms = traits.Tuple( traits.Float, traits.Float, traits.Float, mandatory=True, - desc="the new resolution", + desc='the new resolution', ) - order = traits.Int(3, usedefault=True, desc="order of interpolator") + order = traits.Int(3, usedefault=True, desc='order of interpolator') clip = traits.Bool( True, usedefault=True, @@ -291,12 +284,12 @@ class _RegridToZoomsInputSpec(BaseInterfaceInputSpec): traits.Float(), default=False, usedefault=True, - desc="apply gaussian smoothing before resampling", + desc='apply gaussian smoothing before resampling', ) class _RegridToZoomsOutputSpec(TraitedSpec): - out_file = File(exists=True, dec="the regridded file") + out_file = File(exists=True, dec='the regridded file') class RegridToZooms(SimpleInterface): @@ -308,8 +301,8 @@ class RegridToZooms(SimpleInterface): def _run_interface(self, runtime): from ..utils.images import resample_by_spacing - self._results["out_file"] = fname_presuffix( - self.inputs.in_file, suffix="_regrid", newpath=runtime.cwd + self._results['out_file'] = fname_presuffix( + self.inputs.in_file, suffix='_regrid', newpath=runtime.cwd ) resample_by_spacing( self.inputs.in_file, @@ -317,20 +310,18 @@ def _run_interface(self, runtime): order=self.inputs.order, clip=self.inputs.clip, smooth=self.inputs.smooth, - ).to_filename(self._results["out_file"]) + ).to_filename(self._results['out_file']) return runtime class _DemeanImageInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="image to be demeaned") - in_mask = File( - exists=True, mandatory=True, desc="mask where median will be calculated" - ) - only_mask = traits.Bool(False, usedefault=True, desc="demean only within mask") + in_file = File(exists=True, mandatory=True, desc='image to be demeaned') + in_mask = File(exists=True, mandatory=True, desc='mask where median will be calculated') + only_mask = traits.Bool(False, usedefault=True, desc='demean only within mask') class _DemeanImageOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="demeaned image") + out_file = File(exists=True, desc='demeaned image') class DemeanImage(SimpleInterface): @@ -340,7 +331,7 @@ class DemeanImage(SimpleInterface): def _run_interface(self, runtime): from ..utils.images import demean - self._results["out_file"] = demean( + self._results['out_file'] = demean( self.inputs.in_file, self.inputs.in_mask, only_mask=self.inputs.only_mask, @@ -350,15 +341,13 @@ def _run_interface(self, runtime): class _FilledImageLikeInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="image to be demeaned") - fill_value = traits.Float(1.0, usedefault=True, desc="value to fill") - dtype = traits.Enum( - "float32", "uint8", usedefault=True, desc="force output data type" - ) + in_file = File(exists=True, mandatory=True, desc='image to be demeaned') + fill_value = traits.Float(1.0, usedefault=True, desc='value to fill') + dtype = traits.Enum('float32', 'uint8', usedefault=True, desc='force output data type') class _FilledImageLikeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="demeaned image") + out_file = File(exists=True, desc='demeaned image') class FilledImageLike(SimpleInterface): @@ -368,7 +357,7 @@ class FilledImageLike(SimpleInterface): def _run_interface(self, runtime): from ..utils.images import nii_ones_like - self._results["out_file"] = nii_ones_like( + self._results['out_file'] = nii_ones_like( self.inputs.in_file, self.inputs.fill_value, self.inputs.dtype, @@ -378,28 +367,26 @@ def _run_interface(self, runtime): class _GenerateSamplingReferenceInputSpec(BaseInterfaceInputSpec): - fixed_image = File( - exists=True, mandatory=True, desc="the reference file, defines the FoV" - ) - moving_image = File(exists=True, mandatory=True, desc="the pixel size reference") - xform_code = traits.Enum(None, 2, 4, usedefault=True, desc="force xform code") + fixed_image = File(exists=True, mandatory=True, desc='the reference file, defines the FoV') + moving_image = File(exists=True, mandatory=True, desc='the pixel size reference') + xform_code = traits.Enum(None, 2, 4, usedefault=True, desc='force xform code') fov_mask = traits.Either( None, File(exists=True), usedefault=True, - desc="mask to clip field of view (in fixed_image space)", + desc='mask to clip field of view (in fixed_image space)', ) keep_native = traits.Bool( True, usedefault=True, - desc="calculate a grid with native resolution covering " - "the volume extent given by fixed_image, fast forward " - "fixed_image otherwise.", + desc='calculate a grid with native resolution covering ' + 'the volume extent given by fixed_image, fast forward ' + 'fixed_image otherwise.', ) class _GenerateSamplingReferenceOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="one file with all inputs flattened") + out_file = File(exists=True, desc='one file with all inputs flattened') class GenerateSamplingReference(SimpleInterface): @@ -422,39 +409,35 @@ class GenerateSamplingReference(SimpleInterface): def _run_interface(self, runtime): if not self.inputs.keep_native: - self._results["out_file"] = self.inputs.fixed_image + self._results['out_file'] = self.inputs.fixed_image return runtime from .. import __version__ - self._results["out_file"] = _gen_reference( + self._results['out_file'] = _gen_reference( self.inputs.fixed_image, self.inputs.moving_image, fov_mask=self.inputs.fov_mask, force_xform_code=self.inputs.xform_code, - message="%s (niworkflows v%s)" % (self.__class__.__name__, __version__), + message=f'{self.__class__.__name__} (niworkflows v{__version__})', newpath=runtime.cwd, ) return runtime class _IntensityClipInputSpec(BaseInterfaceInputSpec): - in_file = File( - exists=True, mandatory=True, desc="3D file which intensity will be clipped" - ) - p_min = traits.Float(35.0, usedefault=True, desc="percentile for the lower bound") - p_max = traits.Float(99.98, usedefault=True, desc="percentile for the upper bound") + in_file = File(exists=True, mandatory=True, desc='3D file which intensity will be clipped') + p_min = traits.Float(35.0, usedefault=True, desc='percentile for the lower bound') + p_max = traits.Float(99.98, usedefault=True, desc='percentile for the upper bound') nonnegative = traits.Bool( - True, usedefault=True, desc="whether input intensities must be positive" - ) - dtype = traits.Enum( - "int16", "float32", "uint8", usedefault=True, desc="output datatype" + True, usedefault=True, desc='whether input intensities must be positive' ) - invert = traits.Bool(False, usedefault=True, desc="finalize by inverting contrast") + dtype = traits.Enum('int16', 'float32', 'uint8', usedefault=True, desc='output datatype') + invert = traits.Bool(False, usedefault=True, desc='finalize by inverting contrast') class _IntensityClipOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="file after clipping") + out_file = File(exists=True, desc='file after clipping') class IntensityClip(SimpleInterface): @@ -464,7 +447,7 @@ class IntensityClip(SimpleInterface): output_spec = _IntensityClipOutputSpec def _run_interface(self, runtime): - self._results["out_file"] = _advanced_clip( + self._results['out_file'] = _advanced_clip( self.inputs.in_file, p_min=self.inputs.p_min, p_max=self.inputs.p_max, @@ -477,18 +460,18 @@ def _run_interface(self, runtime): class _MapLabelsInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, desc="Segmented NIfTI") + in_file = File(exists=True, desc='Segmented NIfTI') mappings = traits.Dict( - xor=["mappings_file"], - desc="Dictionary of label / replacement label pairs", + xor=['mappings_file'], + desc='Dictionary of label / replacement label pairs', ) mappings_file = File( - exists=True, xor=["mappings"], help="JSON composed of label / replacement label pairs." + exists=True, xor=['mappings'], help='JSON composed of label / replacement label pairs.' ) class _MapLabelsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Labeled file") + out_file = File(exists=True, desc='Labeled file') class MapLabels(SimpleInterface): @@ -499,7 +482,7 @@ class MapLabels(SimpleInterface): def _run_interface(self, runtime): mapping = self.inputs.mappings or _load_int_json(self.inputs.mappings_file) - self._results["out_file"] = _remap_labels( + self._results['out_file'] = _remap_labels( self.inputs.in_file, mapping, newpath=runtime.cwd, @@ -508,17 +491,17 @@ def _run_interface(self, runtime): class ReorientImageInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="Moving file") + in_file = File(exists=True, mandatory=True, desc='Moving file') target_file = File( - exists=True, xor=["target_orientation"], desc="Reference file to reorient to" + exists=True, xor=['target_orientation'], desc='Reference file to reorient to' ) target_orientation = traits.Str( - xor=["target_file"], desc="Axis codes of coordinate system to reorient to" + xor=['target_file'], desc='Axis codes of coordinate system to reorient to' ) class ReorientImageOutputSpec(TraitedSpec): - out_file = File(desc="Reoriented file") + out_file = File(desc='Reoriented file') class ReorientImage(SimpleInterface): @@ -526,7 +509,7 @@ class ReorientImage(SimpleInterface): output_spec = ReorientImageOutputSpec def _run_interface(self, runtime): - self._results["out_file"] = reorient_file( + self._results['out_file'] = reorient_file( self.inputs.in_file, target_file=self.inputs.target_file, target_ornt=self.inputs.target_orientation, @@ -535,7 +518,11 @@ def _run_interface(self, runtime): def reorient_file( - in_file: str, *, target_file: str = None, target_ornt: str = None, newpath: str = None, + in_file: str, + *, + target_file: str = None, + target_ornt: str = None, + newpath: str = None, ) -> str: """ Reorient an image. @@ -553,7 +540,7 @@ def reorient_file( img = nb.load(in_file) if not target_file and not target_ornt: - raise TypeError("No target orientation or file is specified.") + raise TypeError('No target orientation or file is specified.') if target_file: target_img = nb.load(target_file) @@ -563,7 +550,7 @@ def reorient_file( if newpath is None: newpath = Path() - out_file = str((Path(newpath) / "reoriented.nii.gz").absolute()) + out_file = str((Path(newpath) / 'reoriented.nii.gz').absolute()) reoriented.to_filename(out_file) return out_file @@ -593,9 +580,7 @@ def _gen_reference( import nilearn.image as nli if out_file is None: - out_file = fname_presuffix( - fixed_image, suffix="_reference", newpath=newpath - ) + out_file = fname_presuffix(fixed_image, suffix='_reference', newpath=newpath) # Moving images may not be RAS/LPS (more generally, transverse-longitudinal-axial) reoriented_moving_img = nb.as_closest_canonical(nb.load(moving_image)) @@ -606,9 +591,7 @@ def _gen_reference( # A positive diagonal affine is RAS, hence the need to reorient above. new_affine = np.diag(np.round(new_zooms, 3)) - resampled = nli.resample_img( - fixed_image, target_affine=new_affine, interpolation="nearest" - ) + resampled = nli.resample_img(fixed_image, target_affine=new_affine, interpolation='nearest') if fov_mask is not None: # If we have a mask, resample again dropping (empty) samples @@ -617,15 +600,13 @@ def _gen_reference( masknii = nb.load(fov_mask) if np.all(masknii.shape[:3] != fixednii.shape[:3]): - raise RuntimeError("Fixed image and mask do not have the same dimensions.") + raise RuntimeError('Fixed image and mask do not have the same dimensions.') if not np.allclose(masknii.affine, fixednii.affine, atol=1e-5): - raise RuntimeError("Fixed image and mask have different affines") + raise RuntimeError('Fixed image and mask have different affines') # Get mask into reference space - masknii = nli.resample_img( - masknii, target_affine=new_affine, interpolation="nearest" - ) + masknii = nli.resample_img(masknii, target_affine=new_affine, interpolation='nearest') res_shape = np.array(masknii.shape[:3]) # Calculate a bounding box for the input mask @@ -644,7 +625,7 @@ def _gen_reference( fixed_image, target_affine=new_affine_4, target_shape=new_shape.tolist(), - interpolation="nearest", + interpolation='nearest', ) xform = resampled.affine # nibabel will pick the best affine @@ -661,15 +642,21 @@ def _gen_reference( # Keep 0, 2, 3, 4 unchanged resampled.header.set_qform(xform, int(xform_code)) resampled.header.set_sform(xform, int(xform_code)) - resampled.header["descrip"] = "reference image generated by %s." % ( - message or "(unknown software)" + resampled.header['descrip'] = 'reference image generated by %s.' % ( + message or '(unknown software)' ) resampled.to_filename(out_file) return out_file def _advanced_clip( - in_file, p_min=35, p_max=99.98, nonnegative=True, dtype="int16", invert=False, newpath=None, + in_file, + p_min=35, + p_max=99.98, + nonnegative=True, + dtype='int16', + invert=False, + newpath=None, ): """ Remove outliers at both ends of the intensity distribution and fit into a given dtype. @@ -686,30 +673,25 @@ def _advanced_clip( """ from pathlib import Path + import nibabel as nb import numpy as np from scipy import ndimage from skimage.morphology import ball - out_file = (Path(newpath or "") / "clipped.nii.gz").absolute() + out_file = (Path(newpath or '') / 'clipped.nii.gz').absolute() # Load data img = nb.squeeze_image(nb.load(in_file)) if len(img.shape) != 3: - raise RuntimeError(f"<{in_file}> is not a 3D file.") - data = img.get_fdata(dtype="float32") + raise RuntimeError(f'<{in_file}> is not a 3D file.') + data = img.get_fdata(dtype='float32') # Calculate stats on denoised version, to preempt outliers from biasing denoised = ndimage.median_filter(data, footprint=ball(3)) - a_min = np.percentile( - denoised[denoised > 0] if nonnegative else denoised, - p_min - ) - a_max = np.percentile( - denoised[denoised > 0] if nonnegative else denoised, - p_max - ) + a_min = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_min) + a_max = np.percentile(denoised[denoised > 0] if nonnegative else denoised, p_max) # Clip and cast data = np.clip(data, a_min=a_min, a_max=a_max) @@ -719,12 +701,12 @@ def _advanced_clip( if invert: data = 1.0 - data - if dtype in ("uint8", "int16"): + if dtype in ('uint8', 'int16'): data = np.round(255 * data).astype(dtype) hdr = img.header.copy() hdr.set_data_dtype(dtype) - hdr["cal_max"] = data.max() + hdr['cal_max'] = data.max() img.__class__(data, img.affine, hdr).to_filename(out_file) @@ -734,11 +716,12 @@ def _advanced_clip( def _dilate(in_file, radius=3, iterations=1, newpath=None): """Dilate (binary) input mask.""" from pathlib import Path - import numpy as np + import nibabel as nb + import numpy as np + from nipype.utils.filemanip import fname_presuffix from scipy import ndimage from skimage.morphology import ball - from nipype.utils.filemanip import fname_presuffix mask = nb.load(in_file) newdata = ndimage.binary_dilation( @@ -748,9 +731,9 @@ def _dilate(in_file, radius=3, iterations=1, newpath=None): ) hdr = mask.header.copy() - hdr.set_data_dtype("uint8") - out_file = fname_presuffix(in_file, suffix="_dil", newpath=newpath or Path.cwd()) - mask.__class__(newdata.astype("uint8"), mask.affine, hdr).to_filename(out_file) + hdr.set_data_dtype('uint8') + out_file = fname_presuffix(in_file, suffix='_dil', newpath=newpath or Path.cwd()) + mask.__class__(newdata.astype('uint8'), mask.affine, hdr).to_filename(out_file) return out_file @@ -765,6 +748,7 @@ def _merge_rois(in_files, newpath=None): If any of these checks fail, an ``AssertionError`` will be raised. """ from pathlib import Path + import nibabel as nb import numpy as np @@ -776,24 +760,28 @@ def _merge_rois(in_files, newpath=None): nonzero = np.any(data, axis=3) for roi in in_files[1:]: img = nb.load(roi) - assert img.shape == data.shape, "Mismatch in image shape" - assert np.allclose(img.affine, affine), "Mismatch in affine" + if not img.shape == data.shape: + raise ValueError('Mismatch in image shape') + if not np.allclose(img.affine, affine): + raise ValueError('Mismatch in affine') roi_data = np.asanyarray(img.dataobj) roi_nonzero = np.any(roi_data, axis=3) - assert not np.any(roi_nonzero & nonzero), "Overlapping ROIs" + if np.any(roi_nonzero & nonzero): + raise ValueError('Overlapping ROIs') nonzero |= roi_nonzero data += roi_data del roi_data if newpath is None: newpath = Path() - out_file = str((Path(newpath) / "combined.nii.gz").absolute()) + out_file = str((Path(newpath) / 'combined.nii.gz').absolute()) img.__class__(data, affine, header).to_filename(out_file) return out_file def _remap_labels(in_file, mapping, newpath=None): from pathlib import Path + import nibabel as nb import numpy as np @@ -814,7 +802,7 @@ def _relabel(label): if newpath is None: newpath = Path() - out_file = str((Path(newpath) / "relabeled.nii.gz").absolute()) + out_file = str((Path(newpath) / 'relabeled.nii.gz').absolute()) img.__class__(out, img.affine, header=img.header).to_filename(out_file) return out_file diff --git a/niworkflows/interfaces/nilearn.py b/niworkflows/interfaces/nilearn.py index 74694dd74c5..711098d095c 100644 --- a/niworkflows/interfaces/nilearn.py +++ b/niworkflows/interfaces/nilearn.py @@ -21,59 +21,53 @@ # https://www.nipreps.org/community/licensing/ # """Utilities based on nilearn.""" + import os + import nibabel as nb import numpy as np - from nipype import logging -from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.base import ( - traits, - isdefined, - TraitedSpec, BaseInterfaceInputSpec, File, InputMultiPath, SimpleInterface, + TraitedSpec, + isdefined, + traits, ) from nipype.interfaces.mixins import reporting +from nipype.utils.filemanip import fname_presuffix + from .reportlets import base as nrb try: from nilearn import __version__ as NILEARN_VERSION except ImportError: - NILEARN_VERSION = "unknown" + NILEARN_VERSION = 'unknown' -LOGGER = logging.getLogger("nipype.interface") -__all__ = ["NILEARN_VERSION", "MaskEPI", "Merge", "ComputeEPIMask"] +LOGGER = logging.getLogger('nipype.interface') +__all__ = ['NILEARN_VERSION', 'MaskEPI', 'Merge', 'ComputeEPIMask'] class _MaskEPIInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiPath( - File(exists=True), mandatory=True, desc="input EPI or list of files" - ) + in_files = InputMultiPath(File(exists=True), mandatory=True, desc='input EPI or list of files') lower_cutoff = traits.Float(0.2, usedefault=True) upper_cutoff = traits.Float(0.85, usedefault=True) connected = traits.Bool(True, usedefault=True) - enhance_t2 = traits.Bool( - False, usedefault=True, desc="enhance T2 contrast on image" - ) + enhance_t2 = traits.Bool(False, usedefault=True, desc='enhance T2 contrast on image') opening = traits.Int(2, usedefault=True) closing = traits.Bool(True, usedefault=True) fill_holes = traits.Bool(True, usedefault=True) exclude_zeros = traits.Bool(False, usedefault=True) ensure_finite = traits.Bool(True, usedefault=True) - target_affine = traits.Either( - None, traits.File(exists=True), default=None, usedefault=True - ) - target_shape = traits.Either( - None, traits.File(exists=True), default=None, usedefault=True - ) + target_affine = traits.Either(None, traits.File(exists=True), default=None, usedefault=True) + target_shape = traits.Either(None, traits.File(exists=True), default=None, usedefault=True) no_sanitize = traits.Bool(False, usedefault=True) class _MaskEPIOutputSpec(TraitedSpec): - out_mask = File(exists=True, desc="output mask") + out_mask = File(exists=True, desc='output mask') class MaskEPI(SimpleInterface): @@ -83,9 +77,9 @@ class MaskEPI(SimpleInterface): output_spec = _MaskEPIOutputSpec def _run_interface(self, runtime): - from skimage import morphology as sim - from scipy.ndimage.morphology import binary_fill_holes from nilearn.masking import compute_epi_mask + from scipy.ndimage.morphology import binary_fill_holes + from skimage import morphology as sim in_files = self.inputs.in_files @@ -126,38 +120,34 @@ def _run_interface(self, runtime): sform, code = nii.get_sform(coded=True) masknii.set_sform(sform, int(code)) - self._results["out_mask"] = fname_presuffix( - self.inputs.in_files[0], suffix="_mask", newpath=runtime.cwd + self._results['out_mask'] = fname_presuffix( + self.inputs.in_files[0], suffix='_mask', newpath=runtime.cwd ) - masknii.to_filename(self._results["out_mask"]) + masknii.to_filename(self._results['out_mask']) return runtime class _MergeInputSpec(BaseInterfaceInputSpec): in_files = InputMultiPath( - File(exists=True), mandatory=True, desc="input list of files to merge" + File(exists=True), mandatory=True, desc='input list of files to merge' ) dtype = traits.Enum( - "f4", - "f8", - "u1", - "u2", - "u4", - "i2", - "i4", + 'f4', + 'f8', + 'u1', + 'u2', + 'u4', + 'i2', + 'i4', usedefault=True, - desc="numpy dtype of output image", - ) - header_source = File( - exists=True, desc="a Nifti file from which the header should be copied" - ) - compress = traits.Bool( - True, usedefault=True, desc="Use gzip compression on .nii output" + desc='numpy dtype of output image', ) + header_source = File(exists=True, desc='a Nifti file from which the header should be copied') + compress = traits.Bool(True, usedefault=True, desc='Use gzip compression on .nii output') class _MergeOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output merged file") + out_file = File(exists=True, desc='output merged file') class Merge(SimpleInterface): @@ -169,10 +159,10 @@ class Merge(SimpleInterface): def _run_interface(self, runtime): from nilearn.image import concat_imgs - ext = ".nii.gz" if self.inputs.compress else ".nii" - self._results["out_file"] = fname_presuffix( + ext = '.nii.gz' if self.inputs.compress else '.nii' + self._results['out_file'] = fname_presuffix( self.inputs.in_files[0], - suffix="_merged" + ext, + suffix='_merged' + ext, newpath=runtime.cwd, use_ext=False, ) @@ -185,18 +175,18 @@ def _run_interface(self, runtime): list(new_nii.header.get_zooms()[:3]) + [src_hdr.get_zooms()[3]] ) - new_nii.to_filename(self._results["out_file"]) + new_nii.to_filename(self._results['out_file']) return runtime class _ComputeEPIMaskInputSpec(nrb._SVGReportCapableInputSpec, BaseInterfaceInputSpec): - in_file = File(exists=True, desc="3D or 4D EPI file") - dilation = traits.Int(desc="binary dilation on the nilearn output") + in_file = File(exists=True, desc='3D or 4D EPI file') + dilation = traits.Int(desc='binary dilation on the nilearn output') class _ComputeEPIMaskOutputSpec(reporting.ReportCapableOutputSpec): - mask_file = File(exists=True, desc="Binary brain mask") + mask_file = File(exists=True, desc='Binary brain mask') class ComputeEPIMask(nrb.SegmentationRC): @@ -204,16 +194,14 @@ class ComputeEPIMask(nrb.SegmentationRC): output_spec = _ComputeEPIMaskOutputSpec def _run_interface(self, runtime): - from scipy.ndimage.morphology import binary_dilation from nilearn.masking import compute_epi_mask + from scipy.ndimage.morphology import binary_dilation orig_file_nii = nb.load(self.inputs.in_file) in_file_data = orig_file_nii.get_fdata() # pad the data to avoid the mask estimation running into edge effects - in_file_data_padded = np.pad( - in_file_data, (1, 1), "constant", constant_values=(0, 0) - ) + in_file_data_padded = np.pad(in_file_data, (1, 1), 'constant', constant_values=(0, 0)) padded_nii = nb.Nifti1Image( in_file_data_padded, orig_file_nii.affine, orig_file_nii.header @@ -232,20 +220,18 @@ def _run_interface(self, runtime): mask_data[in_file_data == 0] = 0 mask_data[np.isnan(in_file_data)] = 0 - better_mask = nb.Nifti1Image( - mask_data, orig_file_nii.affine, orig_file_nii.header - ) + better_mask = nb.Nifti1Image(mask_data, orig_file_nii.affine, orig_file_nii.header) better_mask.set_data_dtype(np.uint8) - better_mask.to_filename("mask_file.nii.gz") + better_mask.to_filename('mask_file.nii.gz') - self._mask_file = os.path.join(runtime.cwd, "mask_file.nii.gz") + self._mask_file = os.path.join(runtime.cwd, 'mask_file.nii.gz') runtime.returncode = 0 return super()._run_interface(runtime) def _list_outputs(self): outputs = super()._list_outputs() - outputs["mask_file"] = self._mask_file + outputs['mask_file'] = self._mask_file return outputs def _post_run_hook(self, runtime): @@ -272,7 +258,7 @@ def _enhance_t2_contrast(in_file, newpath=None, offset=0.5): effectively splits brain and background and makes the overall distribution more Gaussian. """ - out_file = fname_presuffix(in_file, suffix="_t1enh", newpath=newpath) + out_file = fname_presuffix(in_file, suffix='_t1enh', newpath=newpath) nii = nb.load(in_file) data = nii.get_fdata() maxd = data.max() diff --git a/niworkflows/interfaces/nitransforms.py b/niworkflows/interfaces/nitransforms.py index ea7dff18f41..97b6039946a 100644 --- a/niworkflows/interfaces/nitransforms.py +++ b/niworkflows/interfaces/nitransforms.py @@ -23,43 +23,42 @@ """Wrappers of NiTransforms.""" from pathlib import Path + from nipype.interfaces.base import ( - TraitedSpec, BaseInterfaceInputSpec, File, - SimpleInterface, InputMultiObject, - traits, + SimpleInterface, + TraitedSpec, isdefined, + traits, ) XFM_FMT = { - ".lta": "fs", - ".txt": "itk", - ".mat": "itk", - ".tfm": "itk", + '.lta': 'fs', + '.txt': 'itk', + '.mat': 'itk', + '.tfm': 'itk', } class _ConcatenateXFMsInputSpec(BaseInterfaceInputSpec): - in_xfms = InputMultiObject(File(exists=True), desc="input transform piles") - inverse = traits.Bool(False, usedefault=True, desc="generate inverse") - out_fmt = traits.Enum("itk", "fs", usedefault=True, desc="output format") + in_xfms = InputMultiObject(File(exists=True), desc='input transform piles') + inverse = traits.Bool(False, usedefault=True, desc='generate inverse') + out_fmt = traits.Enum('itk', 'fs', usedefault=True, desc='output format') reference = File( exists=True, - desc="reference file (only for writing LTA format, if not " - "concatenating another LTA).", + desc='reference file (only for writing LTA format, if not concatenating another LTA).', ) moving = File( exists=True, - desc="moving file (only for writing LTA format, if not " - "concatenating another LTA).", + desc='moving file (only for writing LTA format, if not concatenating another LTA).', ) class _ConcatenateXFMsOutputSpec(TraitedSpec): - out_xfm = File(exists=True, desc="output, combined transform") - out_inv = File(desc="output, combined transform") + out_xfm = File(exists=True, desc='output, combined transform') + out_inv = File(desc='output, combined transform') class ConcatenateXFMs(SimpleInterface): @@ -69,15 +68,15 @@ class ConcatenateXFMs(SimpleInterface): output_spec = _ConcatenateXFMsOutputSpec def _run_interface(self, runtime): - out_ext = "lta" if self.inputs.out_fmt == "fs" else "tfm" + out_ext = 'lta' if self.inputs.out_fmt == 'fs' else 'tfm' reference = self.inputs.reference if isdefined(self.inputs.reference) else None moving = self.inputs.moving if isdefined(self.inputs.moving) else None - out_file = Path(runtime.cwd) / f"out_fwd.{out_ext}" - self._results["out_xfm"] = str(out_file) + out_file = Path(runtime.cwd) / f'out_fwd.{out_ext}' + self._results['out_xfm'] = str(out_file) out_inv = None if self.inputs.inverse: - out_inv = Path(runtime.cwd) / f"out_inv.{out_ext}" - self._results["out_inv"] = str(out_inv) + out_inv = Path(runtime.cwd) / f'out_inv.{out_ext}' + self._results['out_inv'] = str(out_inv) concatenate_xfms( self.inputs.in_xfms, @@ -90,12 +89,10 @@ def _run_interface(self, runtime): return runtime -def concatenate_xfms( - in_files, out_file, out_inv=None, reference=None, moving=None, fmt="itk" -): +def concatenate_xfms(in_files, out_file, out_inv=None, reference=None, moving=None, fmt='itk'): """Concatenate linear transforms.""" - from nitransforms.manip import TransformChain from nitransforms.linear import load as load_affine + from nitransforms.manip import TransformChain xfm = TransformChain( [load_affine(f, fmt=XFM_FMT[Path(f).suffix]) for f in in_files] diff --git a/niworkflows/interfaces/norm.py b/niworkflows/interfaces/norm.py index 50c010afc19..de4234bdad2 100644 --- a/niworkflows/interfaces/norm.py +++ b/niworkflows/interfaces/norm.py @@ -21,28 +21,27 @@ # https://www.nipreps.org/community/licensing/ # """A robust ANTs T1-to-MNI registration workflow with fallback retry.""" -from os import path as op from multiprocessing import cpu_count -from packaging.version import Version -import numpy as np +from os import path as op -from nipype.interfaces.ants.registration import RegistrationOutputSpec +import numpy as np from nipype.interfaces.ants import AffineInitializer +from nipype.interfaces.ants.registration import RegistrationOutputSpec from nipype.interfaces.base import ( - traits, - isdefined, BaseInterface, BaseInterfaceInputSpec, File, + isdefined, + traits, ) - +from packaging.version import Version from templateflow.api import get as get_template + from .. import NIWORKFLOWS_LOG, __version__ from ..data import load as load_data from .fixes import FixHeaderRegistration as Registration - niworkflows_version = Version(__version__) @@ -51,60 +50,58 @@ class _SpatialNormalizationInputSpec(BaseInterfaceInputSpec): package_version = niworkflows_version # Moving image. - moving_image = File( - exists=True, mandatory=True, desc="image to apply transformation to" - ) + moving_image = File(exists=True, mandatory=True, desc='image to apply transformation to') # Reference image (optional). - reference_image = File(exists=True, desc="override the reference image") + reference_image = File(exists=True, desc='override the reference image') # Moving mask (optional). - moving_mask = File(exists=True, desc="moving image mask") + moving_mask = File(exists=True, desc='moving image mask') # Reference mask (optional). - reference_mask = File(exists=True, desc="reference image mask") + reference_mask = File(exists=True, desc='reference image mask') # Lesion mask (optional). - lesion_mask = File(exists=True, desc="lesion mask image") + lesion_mask = File(exists=True, desc='lesion mask image') # Number of threads to use for ANTs/ITK processes. num_threads = traits.Int( - cpu_count(), usedefault=True, nohash=True, desc="Number of ITK threads to use" + cpu_count(), usedefault=True, nohash=True, desc='Number of ITK threads to use' ) # ANTs parameter set to use. flavor = traits.Enum( - "precise", - "testing", - "fast", + 'precise', + 'testing', + 'fast', usedefault=True, - desc="registration settings parameter set", + desc='registration settings parameter set', ) # Template orientation. orientation = traits.Enum( - "RAS", - "LAS", + 'RAS', + 'LAS', mandatory=True, usedefault=True, - desc="modify template orientation (should match input image)", + desc='modify template orientation (should match input image)', ) # Modality of the reference image. reference = traits.Enum( - "T1w", - "T2w", - "boldref", - "PDw", + 'T1w', + 'T2w', + 'boldref', + 'PDw', mandatory=True, usedefault=True, - desc="set the reference modality for registration", + desc='set the reference modality for registration', ) # T1 or EPI registration? moving = traits.Enum( - "T1w", "boldref", usedefault=True, mandatory=True, desc="registration type" + 'T1w', 'boldref', usedefault=True, mandatory=True, desc='registration type' ) # Template to use as the default reference image. template = traits.Str( - "MNI152NLin2009cAsym", usedefault=True, desc="define the template to be used" + 'MNI152NLin2009cAsym', usedefault=True, desc='define the template to be used' ) # Load other settings from file. - settings = traits.List(File(exists=True), desc="pass on the list of settings files") + settings = traits.List(File(exists=True), desc='pass on the list of settings files') # Resolution of the default template. - template_spec = traits.DictStrAny(desc="template specifications") - template_resolution = traits.Enum(1, 2, None, desc="(DEPRECATED) template resolution") + template_spec = traits.DictStrAny(desc='template specifications') + template_resolution = traits.Enum(1, 2, None, desc='(DEPRECATED) template resolution') # Use explicit masking? explicit_masking = traits.Bool( True, @@ -115,16 +112,12 @@ class _SpatialNormalizationInputSpec(BaseInterfaceInputSpec): See https://sourceforge.net/p/advants/discussion/840261/thread/27216e69/#c7ba\ """, ) - initial_moving_transform = File(exists=True, desc="transform for initialization") - float = traits.Bool( - False, usedefault=True, desc="use single precision calculations" - ) + initial_moving_transform = File(exists=True, desc='transform for initialization') + float = traits.Bool(False, usedefault=True, desc='use single precision calculations') class _SpatialNormalizationOutputSpec(RegistrationOutputSpec): - reference_image = File( - exists=True, desc="reference image used for registration target" - ) + reference_image = File(exists=True, desc='reference image used for registration target') class SpatialNormalization(BaseInterface): @@ -140,14 +133,14 @@ class SpatialNormalization(BaseInterface): def _list_outputs(self): outputs = self.norm._list_outputs() - outputs["reference_image"] = self._reference_image + outputs['reference_image'] = self._reference_image return outputs def __init__(self, **inputs): self.norm = None self._reference_image = None self.retry = 1 - self.terminal_output = "file" + self.terminal_output = 'file' super().__init__(**inputs) def _get_settings(self): @@ -158,14 +151,19 @@ def _get_settings(self): # If user-defined settings exist... if isdefined(self.inputs.settings): # Note this in the log and return those settings. - NIWORKFLOWS_LOG.info("User-defined settings, overriding defaults") + NIWORKFLOWS_LOG.info('User-defined settings, overriding defaults') return self.inputs.settings data_dir = load_data() # Get a list of settings files that match the flavor. - return sorted([str(path) for path in data_dir.glob( - f"{self.inputs.moving.lower()}-mni_registration_{self.inputs.flavor}_*.json" - )]) + return sorted( + [ + str(path) + for path in data_dir.glob( + f'{self.inputs.moving.lower()}-mni_registration_{self.inputs.flavor}_*.json' + ) + ] + ) def _run_interface(self, runtime): # Get a list of settings files. @@ -173,29 +171,28 @@ def _run_interface(self, runtime): ants_args = self._get_ants_args() if not isdefined(self.inputs.initial_moving_transform): - NIWORKFLOWS_LOG.info("Estimating initial transform using AffineInitializer") + NIWORKFLOWS_LOG.info('Estimating initial transform using AffineInitializer') init = AffineInitializer( - fixed_image=ants_args["fixed_image"], - moving_image=ants_args["moving_image"], + fixed_image=ants_args['fixed_image'], + moving_image=ants_args['moving_image'], num_threads=self.inputs.num_threads, ) init.resource_monitor = False - init.terminal_output = "allatonce" + init.terminal_output = 'allatonce' init_result = init.run() # Save outputs (if available) - init_out = _write_outputs(init_result.runtime, ".nipype-init") + init_out = _write_outputs(init_result.runtime, '.nipype-init') if init_out: NIWORKFLOWS_LOG.info( - "Terminal outputs of initialization saved (%s).", - ", ".join(init_out), + 'Terminal outputs of initialization saved (%s).', + ', '.join(init_out), ) - ants_args["initial_moving_transform"] = init_result.outputs.out_file + ants_args['initial_moving_transform'] = init_result.outputs.out_file # For each settings file... for ants_settings in settings_files: - - NIWORKFLOWS_LOG.info("Loading settings from file %s.", ants_settings) + NIWORKFLOWS_LOG.info('Loading settings from file %s.', ants_settings) # Configure an ANTs run based on these settings. self.norm = Registration(from_file=ants_settings, **ants_args) self.norm.resource_monitor = False @@ -203,30 +200,24 @@ def _run_interface(self, runtime): cmd = self.norm.cmdline # Print the retry number and command line call to the log. - NIWORKFLOWS_LOG.info("Retry #%d, commandline: \n%s", self.retry, cmd) + NIWORKFLOWS_LOG.info('Retry #%d, commandline: \n%s', self.retry, cmd) self.norm.ignore_exception = True - with open("command.txt", "w") as cmdfile: - print(cmd + "\n", file=cmdfile) + with open('command.txt', 'w') as cmdfile: + print(cmd + '\n', file=cmdfile) # Try running registration. interface_result = self.norm.run() if interface_result.runtime.returncode != 0: - NIWORKFLOWS_LOG.warning("Retry #%d failed.", self.retry) + NIWORKFLOWS_LOG.warning('Retry #%d failed.', self.retry) # Save outputs (if available) - term_out = _write_outputs( - interface_result.runtime, ".nipype-%04d" % self.retry - ) + term_out = _write_outputs(interface_result.runtime, '.nipype-%04d' % self.retry) if term_out: - NIWORKFLOWS_LOG.warning( - "Log of failed retry saved (%s).", ", ".join(term_out) - ) + NIWORKFLOWS_LOG.warning('Log of failed retry saved (%s).', ', '.join(term_out)) else: runtime.returncode = 0 # Note this in the log. - NIWORKFLOWS_LOG.info( - "Successful spatial normalization (retry #%d).", self.retry - ) + NIWORKFLOWS_LOG.info('Successful spatial normalization (retry #%d).', self.retry) # Break out of the retry loop. return runtime @@ -234,17 +225,17 @@ def _run_interface(self, runtime): # If all tries fail, raise an error. raise RuntimeError( - "Robust spatial normalization failed after %d retries." % (self.retry - 1) + 'Robust spatial normalization failed after %d retries.' % (self.retry - 1) ) def _get_ants_args(self): args = { - "moving_image": self.inputs.moving_image, - "num_threads": self.inputs.num_threads, - "float": self.inputs.float, - "terminal_output": "file", - "write_composite_transform": True, - "initial_moving_transform": self.inputs.initial_moving_transform, + 'moving_image': self.inputs.moving_image, + 'num_threads': self.inputs.num_threads, + 'float': self.inputs.float, + 'terminal_output': 'file', + 'write_composite_transform': True, + 'initial_moving_transform': self.inputs.initial_moving_transform, } """ @@ -281,17 +272,17 @@ def _get_ants_args(self): if self.inputs.explicit_masking: # Mask the moving image. # Do not use a moving mask during registration. - args["moving_image"] = mask( + args['moving_image'] = mask( self.inputs.moving_image, self.inputs.moving_mask, - "moving_masked.nii.gz", + 'moving_masked.nii.gz', ) # If explicit masking is disabled... else: # Use the moving mask during registration. # Do not mask the moving image. - args["moving_image_masks"] = self.inputs.moving_mask + args['moving_image_masks'] = self.inputs.moving_mask # If a lesion mask is also provided... if isdefined(self.inputs.lesion_mask): @@ -299,7 +290,7 @@ def _get_ants_args(self): # [global mask - lesion mask] (if explicit masking is enabled) # [moving mask - lesion mask] (if explicit masking is disabled) # Use this as the moving mask. - args["moving_image_masks"] = create_cfm( + args['moving_image_masks'] = create_cfm( self.inputs.moving_mask, lesion_mask=self.inputs.lesion_mask, global_mask=self.inputs.explicit_masking, @@ -310,7 +301,7 @@ def _get_ants_args(self): elif isdefined(self.inputs.lesion_mask): # Create a cost function mask with the form: [global mask - lesion mask] # Use this as the moving mask. - args["moving_image_masks"] = create_cfm( + args['moving_image_masks'] = create_cfm( self.inputs.moving_image, lesion_mask=self.inputs.lesion_mask, global_mask=True, @@ -347,7 +338,7 @@ def _get_ants_args(self): # If a reference image is provided... if isdefined(self.inputs.reference_image): # Use the reference image as the fixed image. - args["fixed_image"] = self.inputs.reference_image + args['fixed_image'] = self.inputs.reference_image self._reference_image = self.inputs.reference_image # If a reference mask is provided... @@ -356,17 +347,17 @@ def _get_ants_args(self): if self.inputs.explicit_masking: # Mask the reference image. # Do not use a fixed mask during registration. - args["fixed_image"] = mask( + args['fixed_image'] = mask( self.inputs.reference_image, self.inputs.reference_mask, - "fixed_masked.nii.gz", + 'fixed_masked.nii.gz', ) # If a lesion mask is also provided... if isdefined(self.inputs.lesion_mask): # Create a cost function mask with the form: [global mask] # Use this as the fixed mask. - args["fixed_image_masks"] = create_cfm( + args['fixed_image_masks'] = create_cfm( self.inputs.reference_mask, lesion_mask=None, global_mask=True, @@ -377,14 +368,14 @@ def _get_ants_args(self): else: # Use the reference mask as the fixed mask during registration. # Do not mask the fixed image. - args["fixed_image_masks"] = self.inputs.reference_mask + args['fixed_image_masks'] = self.inputs.reference_mask # If no reference mask is provided... # But a lesion mask *IS* provided ... elif isdefined(self.inputs.lesion_mask): # Create a cost function mask with the form: [global mask] # Use this as the fixed mask - args["fixed_image_masks"] = create_cfm( + args['fixed_image_masks'] = create_cfm( self.inputs.reference_image, lesion_mask=None, global_mask=True ) @@ -393,28 +384,22 @@ def _get_ants_args(self): from ..utils.misc import get_template_specs # Raise an error if the user specifies an unsupported image orientation. - if self.inputs.orientation == "LAS": + if self.inputs.orientation == 'LAS': raise NotImplementedError template_spec = ( - self.inputs.template_spec - if isdefined(self.inputs.template_spec) - else {} + self.inputs.template_spec if isdefined(self.inputs.template_spec) else {} ) - default_resolution = {"precise": 1, "fast": 2, "testing": 2}[ - self.inputs.flavor - ] + default_resolution = {'precise': 1, 'fast': 2, 'testing': 2}[self.inputs.flavor] # Set the template resolution. if isdefined(self.inputs.template_resolution): - NIWORKFLOWS_LOG.warning( - "The use of ``template_resolution`` is deprecated" - ) - template_spec["res"] = self.inputs.template_resolution + NIWORKFLOWS_LOG.warning('The use of ``template_resolution`` is deprecated') + template_spec['res'] = self.inputs.template_resolution - template_spec["suffix"] = self.inputs.reference - template_spec["desc"] = None + template_spec['suffix'] = self.inputs.reference + template_spec['desc'] = None ref_template, template_spec = get_template_specs( self.inputs.template, template_spec=template_spec, @@ -426,36 +411,33 @@ def _get_ants_args(self): self._reference_image = ref_template if not op.isfile(self._reference_image): raise ValueError( - """\ -The registration reference must be an existing file, but path "%s" \ + f"""\ +The registration reference must be an existing file, but path "{ref_template}" \ cannot be found.""" - % ref_template ) # Get the template specified by the user. ref_mask = get_template( - self.inputs.template, desc="brain", suffix="mask", **template_spec - ) or get_template(self.inputs.template, label="brain", suffix="mask", **template_spec) + self.inputs.template, desc='brain', suffix='mask', **template_spec + ) or get_template(self.inputs.template, label='brain', suffix='mask', **template_spec) # Default is explicit masking disabled - args["fixed_image"] = ref_template + args['fixed_image'] = ref_template # Use the template mask as the fixed mask. - args["fixed_image_masks"] = str(ref_mask) + args['fixed_image_masks'] = str(ref_mask) # Overwrite defaults if explicit masking if self.inputs.explicit_masking: # Mask the template image with the template mask. - args["fixed_image"] = mask( - ref_template, str(ref_mask), "fixed_masked.nii.gz" - ) + args['fixed_image'] = mask(ref_template, str(ref_mask), 'fixed_masked.nii.gz') # Do not use a fixed mask during registration. - args.pop("fixed_image_masks", None) + args.pop('fixed_image_masks', None) # If a lesion mask is provided... if isdefined(self.inputs.lesion_mask): # Create a cost function mask with the form: [global mask] # Use this as the fixed mask. - args["fixed_image_masks"] = create_cfm( + args['fixed_image_masks'] = create_cfm( str(ref_mask), lesion_mask=None, global_mask=True ) @@ -486,9 +468,10 @@ def mask(in_file, mask_file, new_name): image space and have the same dimensions. """ - import nibabel as nb import os + import nibabel as nb + # Load the input image in_nii = nb.load(in_file) # Load the mask image @@ -531,32 +514,29 @@ def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None): """ import os - import numpy as np + import nibabel as nb + import numpy as np from nipype.utils.filemanip import fname_presuffix if out_path is None: - out_path = fname_presuffix(in_file, suffix="_cfm", newpath=os.getcwd()) + out_path = fname_presuffix(in_file, suffix='_cfm', newpath=os.getcwd()) else: out_path = os.path.abspath(out_path) if not global_mask and not lesion_mask: NIWORKFLOWS_LOG.warning( - "No lesion mask was provided and global_mask not requested, " - "therefore the original mask will not be modified." + 'No lesion mask was provided and global_mask not requested, ' + 'therefore the original mask will not be modified.' ) # Load the input image in_img = nb.load(in_file) # If we want a global mask, create one based on the input image. - data = ( - np.ones(in_img.shape, dtype=np.uint8) - if global_mask - else np.asanyarray(in_img.dataobj) - ) + data = np.ones(in_img.shape, dtype=np.uint8) if global_mask else np.asanyarray(in_img.dataobj) if set(np.unique(data)) - {0, 1}: - raise ValueError("`global_mask` must be true if `in_file` is not a binary mask") + raise ValueError('`global_mask` must be true if `in_file` is not a binary mask') # If a lesion mask was provided, combine it with the secondary mask. if lesion_mask is not None: @@ -579,14 +559,14 @@ def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None): def _write_outputs(runtime, out_fname=None): if out_fname is None: - out_fname = ".nipype" + out_fname = '.nipype' out_files = [] - for name in ["stdout", "stderr", "merged"]: - stream = getattr(runtime, name, "") + for name in ['stdout', 'stderr', 'merged']: + stream = getattr(runtime, name, '') if stream: out_file = op.join(runtime.cwd, name + out_fname) - with open(out_file, "w") as outf: + with open(out_file, 'w') as outf: print(stream, file=outf) out_files.append(out_file) return out_files diff --git a/niworkflows/interfaces/patches.py b/niworkflows/interfaces/patches.py index d72764a2335..68dc7f9125b 100644 --- a/niworkflows/interfaces/patches.py +++ b/niworkflows/interfaces/patches.py @@ -25,10 +25,10 @@ from random import randint from time import sleep -from numpy.linalg.linalg import LinAlgError from nipype.algorithms import confounds as nac from nipype.interfaces import io as nio from nipype.interfaces.base import File +from numpy.linalg.linalg import LinAlgError class RobustACompCor(nac.ACompCor): diff --git a/niworkflows/interfaces/plotting.py b/niworkflows/interfaces/plotting.py index 0961f3cb5fb..2e9b26083ac 100644 --- a/niworkflows/interfaces/plotting.py +++ b/niworkflows/interfaces/plotting.py @@ -21,40 +21,41 @@ # https://www.nipreps.org/community/licensing/ # """Visualization tools.""" -import numpy as np -import nibabel as nb -from nipype.utils.filemanip import fname_presuffix +import nibabel as nb +import numpy as np from nipype.interfaces.base import ( - File, BaseInterfaceInputSpec, - TraitedSpec, + File, SimpleInterface, - traits, + TraitedSpec, isdefined, + traits, ) +from nipype.utils.filemanip import fname_presuffix + from niworkflows.utils.timeseries import _cifti_timeseries, _nifti_timeseries from niworkflows.viz.plots import ( - fMRIPlot, compcor_variance_plot, confounds_correlation_plot, + fMRIPlot, ) class _FMRISummaryInputSpec(BaseInterfaceInputSpec): - in_func = File(exists=True, mandatory=True, desc="") - in_spikes_bg = File(exists=True, desc="") - fd = File(exists=True, desc="") - dvars = File(exists=True, desc="") - outliers = File(exists=True, desc="") - in_segm = File(exists=True, desc="") - tr = traits.Either(None, traits.Float, usedefault=True, desc="the TR") - fd_thres = traits.Float(0.2, usedefault=True, desc="") - drop_trs = traits.Int(0, usedefault=True, desc="dummy scans") + in_func = File(exists=True, mandatory=True, desc='') + in_spikes_bg = File(exists=True, desc='') + fd = File(exists=True, desc='') + dvars = File(exists=True, desc='') + outliers = File(exists=True, desc='') + in_segm = File(exists=True, desc='') + tr = traits.Either(None, traits.Float, usedefault=True, desc='the TR') + fd_thres = traits.Float(0.2, usedefault=True, desc='') + drop_trs = traits.Int(0, usedefault=True, desc='dummy scans') class _FMRISummaryOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="written file path") + out_file = File(exists=True, desc='written file path') class FMRISummary(SimpleInterface): @@ -66,53 +67,54 @@ class FMRISummary(SimpleInterface): def _run_interface(self, runtime): import pandas as pd - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.in_func, - suffix="_fmriplot.svg", + suffix='_fmriplot.svg', use_ext=False, newpath=runtime.cwd, ) - dataframe = pd.DataFrame({ - "outliers": np.loadtxt(self.inputs.outliers, usecols=[0]).tolist(), - # Pick non-standardize dvars (col 1) - # First timepoint is NaN (difference) - "DVARS": [np.nan] - + np.loadtxt(self.inputs.dvars, skiprows=1, usecols=[1]).tolist(), - # First timepoint is zero (reference volume) - "FD": [0.0] - + np.loadtxt(self.inputs.fd, skiprows=1, usecols=[0]).tolist(), - }) if ( - isdefined(self.inputs.outliers) - and isdefined(self.inputs.dvars) - and isdefined(self.inputs.fd) - ) else None + dataframe = ( + pd.DataFrame( + { + 'outliers': np.loadtxt(self.inputs.outliers, usecols=[0]).tolist(), + # Pick non-standardize dvars (col 1) + # First timepoint is NaN (difference) + 'DVARS': [np.nan] + + np.loadtxt(self.inputs.dvars, skiprows=1, usecols=[1]).tolist(), + # First timepoint is zero (reference volume) + 'FD': [0.0] + np.loadtxt(self.inputs.fd, skiprows=1, usecols=[0]).tolist(), + } + ) + if ( + isdefined(self.inputs.outliers) + and isdefined(self.inputs.dvars) + and isdefined(self.inputs.fd) + ) + else None + ) input_data = nb.load(self.inputs.in_func) seg_file = self.inputs.in_segm if isdefined(self.inputs.in_segm) else None dataset, segments = ( _cifti_timeseries(input_data) - if isinstance(input_data, nb.Cifti2Image) else - _nifti_timeseries(input_data, seg_file) + if isinstance(input_data, nb.Cifti2Image) + else _nifti_timeseries(input_data, seg_file) ) fig = fMRIPlot( dataset, segments=segments, spikes_files=( - [self.inputs.in_spikes_bg] - if isdefined(self.inputs.in_spikes_bg) else None - ), - tr=( - self.inputs.tr if isdefined(self.inputs.tr) else - _get_tr(input_data) + [self.inputs.in_spikes_bg] if isdefined(self.inputs.in_spikes_bg) else None ), + tr=(self.inputs.tr if isdefined(self.inputs.tr) else _get_tr(input_data)), confounds=dataframe, - units={"outliers": "%", "FD": "mm"}, - vlines={"FD": [self.inputs.fd_thres]}, + units={'outliers': '%', 'FD': 'mm'}, + vlines={'FD': [self.inputs.fd_thres]}, nskip=self.inputs.drop_trs, ).plot() - fig.savefig(self._results["out_file"], bbox_inches="tight") + fig.savefig(self._results['out_file'], bbox_inches='tight') return runtime @@ -120,28 +122,26 @@ class _CompCorVariancePlotInputSpec(BaseInterfaceInputSpec): metadata_files = traits.List( File(exists=True), mandatory=True, - desc="List of files containing component metadata", + desc='List of files containing component metadata', ) metadata_sources = traits.List( traits.Str, - desc="List of names of decompositions " - "(e.g., aCompCor, tCompCor) yielding " - "the arguments in `metadata_files`", + desc='List of names of decompositions ' + '(e.g., aCompCor, tCompCor) yielding ' + 'the arguments in `metadata_files`', ) variance_thresholds = traits.Tuple( traits.Float(0.5), traits.Float(0.7), traits.Float(0.9), usedefault=True, - desc="Levels of explained variance to include in plot", - ) - out_file = traits.Either( - None, File, value=None, usedefault=True, desc="Path to save plot" + desc='Levels of explained variance to include in plot', ) + out_file = traits.Either(None, File, value=None, usedefault=True, desc='Path to save plot') class _CompCorVariancePlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Path to saved plot") + out_file = File(exists=True, desc='Path to saved plot') class CompCorVariancePlot(SimpleInterface): @@ -152,59 +152,51 @@ class CompCorVariancePlot(SimpleInterface): def _run_interface(self, runtime): if self.inputs.out_file is None: - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.metadata_files[0], - suffix="_compcor.svg", + suffix='_compcor.svg', use_ext=False, newpath=runtime.cwd, ) else: - self._results["out_file"] = self.inputs.out_file + self._results['out_file'] = self.inputs.out_file compcor_variance_plot( metadata_files=self.inputs.metadata_files, metadata_sources=self.inputs.metadata_sources, - output_file=self._results["out_file"], + output_file=self._results['out_file'], varexp_thresh=self.inputs.variance_thresholds, ) return runtime class _ConfoundsCorrelationPlotInputSpec(BaseInterfaceInputSpec): - confounds_file = File( - exists=True, mandatory=True, desc="File containing confound regressors" - ) - out_file = traits.Either( - None, File, value=None, usedefault=True, desc="Path to save plot" - ) + confounds_file = File(exists=True, mandatory=True, desc='File containing confound regressors') + out_file = traits.Either(None, File, value=None, usedefault=True, desc='Path to save plot') reference_column = traits.Str( - "global_signal", + 'global_signal', usedefault=True, - desc="Column in the confound file for " - "which all correlation magnitudes " - "should be ranked and plotted", - ) - columns = traits.List( - traits.Str, - desc="Filter out all regressors not found in this list." + desc='Column in the confound file for ' + 'which all correlation magnitudes ' + 'should be ranked and plotted', ) + columns = traits.List(traits.Str, desc='Filter out all regressors not found in this list.') max_dim = traits.Int( 20, usedefault=True, - desc="Maximum number of regressors to include in " - "plot. Regressors with highest magnitude of " - "correlation with `reference_column` will be " - "selected.", + desc='Maximum number of regressors to include in ' + 'plot. Regressors with highest magnitude of ' + 'correlation with `reference_column` will be ' + 'selected.', ) ignore_initial_volumes = traits.Int( 0, usedefault=True, - desc="Number of non-steady-state volumes at the beginning of the scan " - "to ignore.", + desc='Number of non-steady-state volumes at the beginning of the scan to ignore.', ) class _ConfoundsCorrelationPlotOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="Path to saved plot") + out_file = File(exists=True, desc='Path to saved plot') class ConfoundsCorrelationPlot(SimpleInterface): @@ -215,19 +207,19 @@ class ConfoundsCorrelationPlot(SimpleInterface): def _run_interface(self, runtime): if self.inputs.out_file is None: - self._results["out_file"] = fname_presuffix( + self._results['out_file'] = fname_presuffix( self.inputs.confounds_file, - suffix="_confoundCorrelation.svg", + suffix='_confoundCorrelation.svg', use_ext=False, newpath=runtime.cwd, ) else: - self._results["out_file"] = self.inputs.out_file + self._results['out_file'] = self.inputs.out_file confounds_correlation_plot( confounds_file=self.inputs.confounds_file, columns=self.inputs.columns if isdefined(self.inputs.columns) else None, max_dim=self.inputs.max_dim, - output_file=self._results["out_file"], + output_file=self._results['out_file'], reference=self.inputs.reference_column, ignore_initial_volumes=self.inputs.ignore_initial_volumes, ) @@ -253,4 +245,4 @@ def _get_tr(img): return img.header.matrix.get_index_map(0).series_step except AttributeError: return img.header.get_zooms()[-1] - raise RuntimeError("Could not extract TR - unknown data structure type") + raise RuntimeError('Could not extract TR - unknown data structure type') diff --git a/niworkflows/interfaces/probmaps.py b/niworkflows/interfaces/probmaps.py index e3ad2a71aad..e62670f0324 100644 --- a/niworkflows/interfaces/probmaps.py +++ b/niworkflows/interfaces/probmaps.py @@ -21,52 +21,43 @@ # https://www.nipreps.org/community/licensing/ # """Utilities.""" -import numpy as np -import nibabel as nb +import nibabel as nb +import numpy as np from nipype import logging -from nipype.utils.filemanip import fname_presuffix from nipype.interfaces.base import ( - traits, - isdefined, + BaseInterfaceInputSpec, File, InputMultiPath, - TraitedSpec, - BaseInterfaceInputSpec, SimpleInterface, + TraitedSpec, + isdefined, + traits, ) +from nipype.utils.filemanip import fname_presuffix - -LOG = logging.getLogger("nipype.interface") +LOG = logging.getLogger('nipype.interface') class _TPM2ROIInputSpec(BaseInterfaceInputSpec): - in_tpm = File( - exists=True, mandatory=True, desc="Tissue probability map file in T1 space" - ) - in_mask = File( - exists=True, mandatory=True, desc="Binary mask of skull-stripped T1w image" - ) + in_tpm = File(exists=True, mandatory=True, desc='Tissue probability map file in T1 space') + in_mask = File(exists=True, mandatory=True, desc='Binary mask of skull-stripped T1w image') mask_erode_mm = traits.Float( - xor=["mask_erode_prop"], desc="erode input mask (kernel width in mm)" - ) - erode_mm = traits.Float( - xor=["erode_prop"], desc="erode output mask (kernel width in mm)" + xor=['mask_erode_prop'], desc='erode input mask (kernel width in mm)' ) + erode_mm = traits.Float(xor=['erode_prop'], desc='erode output mask (kernel width in mm)') mask_erode_prop = traits.Float( - xor=["mask_erode_mm"], desc="erode input mask (target volume ratio)" - ) - erode_prop = traits.Float( - xor=["erode_mm"], desc="erode output mask (target volume ratio)" + xor=['mask_erode_mm'], desc='erode input mask (target volume ratio)' ) + erode_prop = traits.Float(xor=['erode_mm'], desc='erode output mask (target volume ratio)') prob_thresh = traits.Float( - 0.95, usedefault=True, desc="threshold for the tissue probability maps" + 0.95, usedefault=True, desc='threshold for the tissue probability maps' ) class _TPM2ROIOutputSpec(TraitedSpec): - roi_file = File(exists=True, desc="output ROI file") - eroded_mask = File(exists=True, desc="resulting eroded mask") + roi_file = File(exists=True, desc='output ROI file') + eroded_mask = File(exists=True, desc='resulting eroded mask') class TPM2ROI(SimpleInterface): @@ -107,20 +98,18 @@ def _run_interface(self, runtime): self.inputs.prob_thresh, newpath=runtime.cwd, ) - self._results["roi_file"] = roi_file - self._results["eroded_mask"] = eroded_mask + self._results['roi_file'] = roi_file + self._results['eroded_mask'] = eroded_mask return runtime class _AddTPMsInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiPath( - File(exists=True), mandatory=True, desc="input list of ROIs" - ) - indices = traits.List(traits.Int, desc="select specific maps") + in_files = InputMultiPath(File(exists=True), mandatory=True, desc='input list of ROIs') + indices = traits.List(traits.Int, desc='select specific maps') class _AddTPMsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="union of binarized input files") + out_file = File(exists=True, desc='union of binarized input files') class AddTPMs(SimpleInterface): @@ -137,27 +126,27 @@ def _run_interface(self, runtime): indices = self.inputs.indices if len(self.inputs.in_files) < 2: - self._results["out_file"] = in_files[0] + self._results['out_file'] = in_files[0] return runtime first_fname = in_files[indices[0]] if len(indices) == 1: - self._results["out_file"] = first_fname + self._results['out_file'] = first_fname return runtime im = nb.concat_images([in_files[i] for i in indices]) data = im.get_fdata().sum(axis=3) data = np.clip(data, a_min=0.0, a_max=1.0) - out_file = fname_presuffix(first_fname, suffix="_tpmsum", newpath=runtime.cwd) + out_file = fname_presuffix(first_fname, suffix='_tpmsum', newpath=runtime.cwd) newnii = im.__class__(data, im.affine, im.header) newnii.set_data_dtype(np.float32) # Set visualization thresholds - newnii.header["cal_max"] = 1.0 - newnii.header["cal_min"] = 0.0 + newnii.header['cal_max'] = 1.0 + newnii.header['cal_min'] = 0.0 newnii.to_filename(out_file) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime @@ -185,7 +174,7 @@ def _tpm2roi( mask_erosion_prop is not None and mask_erosion_prop < 1 ) if erode_in: - eroded_mask_file = fname_presuffix(in_mask, suffix="_eroded", newpath=newpath) + eroded_mask_file = fname_presuffix(in_mask, suffix='_eroded', newpath=newpath) mask_img = nb.load(in_mask) mask_data = np.asanyarray(mask_img.dataobj).astype(np.uint8) if mask_erosion_mm: @@ -219,7 +208,7 @@ def _tpm2roi( roi_mask = nd.binary_erosion(roi_mask, iterations=1) # Create image to resample - roi_fname = fname_presuffix(in_tpm, suffix="_roi", newpath=newpath) + roi_fname = fname_presuffix(in_tpm, suffix='_roi', newpath=newpath) roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header) roi_img.set_data_dtype(np.uint8) roi_img.to_filename(roi_fname) diff --git a/niworkflows/interfaces/reportlets/__init__.py b/niworkflows/interfaces/reportlets/__init__.py index e4a66e15d0b..448e3595d78 100644 --- a/niworkflows/interfaces/reportlets/__init__.py +++ b/niworkflows/interfaces/reportlets/__init__.py @@ -1,7 +1,5 @@ import warnings -msg = ( - 'Niworkflows will be deprecating reporting in favor of a standalone library "nireports".' -) +msg = 'Niworkflows will be deprecating reporting in favor of a standalone library "nireports".' -warnings.warn(msg, PendingDeprecationWarning) +warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) diff --git a/niworkflows/interfaces/reportlets/base.py b/niworkflows/interfaces/reportlets/base.py index 5915cc43789..e59744c1efa 100644 --- a/niworkflows/interfaces/reportlets/base.py +++ b/niworkflows/interfaces/reportlets/base.py @@ -21,24 +21,24 @@ # https://www.nipreps.org/community/licensing/ # """class mixin and utilities for enabling reports for nipype interfaces.""" + from nipype.interfaces.base import File, traits from nipype.interfaces.mixins import reporting + from ... import NIWORKFLOWS_LOG -from ...viz.utils import cuts_from_bbox, compose_view +from ...viz.utils import compose_view, cuts_from_bbox class _SVGReportCapableInputSpec(reporting.ReportCapableInputSpec): - out_report = File( - "report.svg", usedefault=True, desc="filename for the visual report" - ) + out_report = File('report.svg', usedefault=True, desc='filename for the visual report') compress_report = traits.Enum( - "auto", + 'auto', True, False, usedefault=True, - desc="Compress the reportlet using SVGO or" + desc='Compress the reportlet using SVGO or' "WEBP. 'auto' - compress if relevant " - "software is installed, True = force," + 'software is installed, True = force,' "False - don't attempt to compress", ) @@ -49,18 +49,19 @@ class RegistrationRC(reporting.ReportCapableInterface): _fixed_image = None _moving_image = None _fixed_image_mask = None - _fixed_image_label = "fixed" - _moving_image_label = "moving" + _fixed_image_label = 'fixed' + _moving_image_label = 'moving' _contour = None _dismiss_affine = False def _generate_report(self): """Generate the visual report.""" - from nilearn.image import threshold_img, load_img + from nilearn.image import load_img, threshold_img from nilearn.masking import apply_mask, unmask + from niworkflows.viz.utils import plot_registration - NIWORKFLOWS_LOG.info("Generating visual report") + NIWORKFLOWS_LOG.info('Generating visual report') fixed_image_nii = load_img(self._fixed_image) moving_image_nii = load_img(self._moving_image) @@ -91,7 +92,7 @@ def _generate_report(self): compose_view( plot_registration( fixed_image_nii, - "fixed-image", + 'fixed-image', estimate_brightness=True, cuts=cuts, label=self._fixed_image_label, @@ -101,7 +102,7 @@ def _generate_report(self): ), plot_registration( moving_image_nii, - "moving-image", + 'moving-image', estimate_brightness=True, cuts=cuts, label=self._moving_image_label, @@ -142,11 +143,12 @@ class SurfaceSegmentationRC(reporting.ReportCapableInterface): def _generate_report(self): """Generate the visual report.""" - from nilearn.image import threshold_img, load_img + from nilearn.image import load_img, threshold_img from nilearn.masking import apply_mask, unmask + from niworkflows.viz.utils import plot_registration - NIWORKFLOWS_LOG.info("Generating visual report") + NIWORKFLOWS_LOG.info('Generating visual report') anat = load_img(self._anat_file) contour_nii = load_img(self._contour) if self._contour is not None else None @@ -167,7 +169,7 @@ def _generate_report(self): compose_view( plot_registration( anat, - "fixed-image", + 'fixed-image', estimate_brightness=True, cuts=cuts, contour=contour_nii, @@ -189,9 +191,7 @@ class ReportingInterface(reporting.ReportCapableInterface): output_spec = reporting.ReportCapableOutputSpec def __init__(self, generate_report=True, **kwargs): - super().__init__( - generate_report=generate_report, **kwargs - ) + super().__init__(generate_report=generate_report, **kwargs) def _run_interface(self, runtime): return runtime diff --git a/niworkflows/interfaces/reportlets/masks.py b/niworkflows/interfaces/reportlets/masks.py index bac370fe280..16673e5a823 100644 --- a/niworkflows/interfaces/reportlets/masks.py +++ b/niworkflows/interfaces/reportlets/masks.py @@ -21,20 +21,22 @@ # https://www.nipreps.org/community/licensing/ # """ReportCapableInterfaces for masks tools.""" + import os -import numpy as np -import nibabel as nb -from nipype.interfaces import fsl, ants +import nibabel as nb +import numpy as np +from nipype.algorithms import confounds +from nipype.interfaces import ants, fsl from nipype.interfaces.base import ( File, - traits, - isdefined, InputMultiPath, Str, + isdefined, + traits, ) from nipype.interfaces.mixins import reporting -from nipype.algorithms import confounds + from ... import NIWORKFLOWS_LOG from . import base as nrb @@ -43,9 +45,7 @@ class _BETInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.BETInputSp pass -class _BETOutputSpecRPT( - reporting.ReportCapableOutputSpec, fsl.preprocess.BETOutputSpec -): +class _BETOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.BETOutputSpec): pass @@ -94,16 +94,11 @@ class BrainExtractionRPT(nrb.SegmentationRC, ants.segmentation.BrainExtraction): output_spec = _BrainExtractionOutputSpecRPT def _post_run_hook(self, runtime): - """ generates a report showing slices from each axis """ + """generates a report showing slices from each axis""" - brain_extraction_mask = self.aggregate_outputs( - runtime=runtime - ).BrainExtractionMask + brain_extraction_mask = self.aggregate_outputs(runtime=runtime).BrainExtractionMask - if ( - isdefined(self.inputs.keep_temporary_files) - and self.inputs.keep_temporary_files == 1 - ): + if isdefined(self.inputs.keep_temporary_files) and self.inputs.keep_temporary_files == 1: self._anat_file = self.aggregate_outputs(runtime=runtime).N4Corrected0 else: self._anat_file = self.inputs.anatomical_image @@ -124,9 +119,7 @@ class _ACompCorInputSpecRPT(nrb._SVGReportCapableInputSpec, confounds.CompCorInp pass -class _ACompCorOutputSpecRPT( - reporting.ReportCapableOutputSpec, confounds.CompCorOutputSpec -): +class _ACompCorOutputSpecRPT(reporting.ReportCapableOutputSpec, confounds.CompCorOutputSpec): pass @@ -135,12 +128,12 @@ class ACompCorRPT(nrb.SegmentationRC, confounds.ACompCor): output_spec = _ACompCorOutputSpecRPT def _post_run_hook(self, runtime): - """ generates a report showing slices from each axis """ + """generates a report showing slices from each axis""" if len(self.inputs.mask_files) != 1: raise ValueError( - "ACompCorRPT only supports a single input mask. " - "A list %s was found." % self.inputs.mask_files + 'ACompCorRPT only supports a single input mask. ' + f'A list {self.inputs.mask_files} was found.' ) self._anat_file = self.inputs.realigned_file self._mask_file = self.inputs.mask_files[0] @@ -156,15 +149,11 @@ def _post_run_hook(self, runtime): return super()._post_run_hook(runtime) -class _TCompCorInputSpecRPT( - nrb._SVGReportCapableInputSpec, confounds.TCompCorInputSpec -): +class _TCompCorInputSpecRPT(nrb._SVGReportCapableInputSpec, confounds.TCompCorInputSpec): pass -class _TCompCorOutputSpecRPT( - reporting.ReportCapableOutputSpec, confounds.TCompCorOutputSpec -): +class _TCompCorOutputSpecRPT(reporting.ReportCapableOutputSpec, confounds.TCompCorOutputSpec): pass @@ -173,16 +162,14 @@ class TCompCorRPT(nrb.SegmentationRC, confounds.TCompCor): output_spec = _TCompCorOutputSpecRPT def _post_run_hook(self, runtime): - """ generates a report showing slices from each axis """ + """generates a report showing slices from each axis""" - high_variance_masks = self.aggregate_outputs( - runtime=runtime - ).high_variance_masks + high_variance_masks = self.aggregate_outputs(runtime=runtime).high_variance_masks if isinstance(high_variance_masks, list): raise ValueError( - "TCompCorRPT only supports a single output high variance mask. " - "A list %s was found." % high_variance_masks + 'TCompCorRPT only supports a single output high variance mask. ' + f'A list {high_variance_masks} was found.' ) self._anat_file = self.inputs.realigned_file self._mask_file = high_variance_masks @@ -199,8 +186,8 @@ def _post_run_hook(self, runtime): class _SimpleShowMaskInputSpec(nrb._SVGReportCapableInputSpec): - background_file = File(exists=True, mandatory=True, desc="file before") - mask_file = File(exists=True, mandatory=True, desc="file before") + background_file = File(exists=True, mandatory=True, desc='file before') + mask_file = File(exists=True, mandatory=True, desc='file before') class SimpleShowMaskRPT(nrb.SegmentationRC, nrb.ReportingInterface): @@ -216,24 +203,22 @@ def _post_run_hook(self, runtime): class _ROIsPlotInputSpecRPT(nrb._SVGReportCapableInputSpec): - in_file = File( - exists=True, mandatory=True, desc="the volume where ROIs are defined" - ) + in_file = File(exists=True, mandatory=True, desc='the volume where ROIs are defined') in_rois = InputMultiPath( - File(exists=True), mandatory=True, desc="a list of regions to be plotted" + File(exists=True), mandatory=True, desc='a list of regions to be plotted' ) - in_mask = File(exists=True, desc="a special region, eg. the brain mask") - masked = traits.Bool(False, usedefault=True, desc="mask in_file prior plotting") + in_mask = File(exists=True, desc='a special region, eg. the brain mask') + masked = traits.Bool(False, usedefault=True, desc='mask in_file prior plotting') colors = traits.Either( - None, traits.List(Str), usedefault=True, desc="use specific colors for contours" + None, traits.List(Str), usedefault=True, desc='use specific colors for contours' ) levels = traits.Either( None, traits.List(traits.Float), usedefault=True, - desc="pass levels to nilearn.plotting", + desc='pass levels to nilearn.plotting', ) - mask_color = Str("r", usedefault=True, desc="color for mask") + mask_color = Str('r', usedefault=True, desc='color for mask') class ROIsPlot(nrb.ReportingInterface): @@ -241,35 +226,34 @@ class ROIsPlot(nrb.ReportingInterface): def _generate_report(self): from seaborn import color_palette - from niworkflows.viz.utils import plot_segs, compose_view + + from niworkflows.viz.utils import compose_view, plot_segs seg_files = self.inputs.in_rois mask_file = None if not isdefined(self.inputs.in_mask) else self.inputs.in_mask # Remove trait decoration and replace None with [] - levels = [level for level in self.inputs.levels or []] - colors = [c for c in self.inputs.colors or []] + levels = list(self.inputs.levels or []) + colors = list(self.inputs.colors or []) if len(seg_files) == 1: # in_rois is a segmentation nsegs = len(levels) if nsegs == 0: - levels = np.unique( - np.round(nb.load(seg_files[0]).get_fdata(dtype="float32")) - ) + levels = np.unique(np.round(nb.load(seg_files[0]).get_fdata(dtype='float32'))) levels = (levels[levels > 0] - 0.5).tolist() nsegs = len(levels) levels = [levels] missing = nsegs - len(colors) if missing > 0: - colors = colors + color_palette("husl", missing) + colors = colors + color_palette('husl', missing) colors = [colors] else: # in_rois is a list of masks nsegs = len(seg_files) levels = [[0.5]] * nsegs missing = nsegs - len(colors) if missing > 0: - colors = [[c] for c in colors + color_palette("husl", missing)] + colors = [[c] for c in colors + color_palette('husl', missing)] if mask_file: seg_files.insert(0, mask_file) diff --git a/niworkflows/interfaces/reportlets/registration.py b/niworkflows/interfaces/reportlets/registration.py index dde3b6984ba..91d8cc004e5 100644 --- a/niworkflows/interfaces/reportlets/registration.py +++ b/niworkflows/interfaces/reportlets/registration.py @@ -21,31 +21,34 @@ # https://www.nipreps.org/community/licensing/ # """ReportCapableInterfaces for registration tools.""" + import os from looseversion import LooseVersion -from nipype.utils.filemanip import fname_presuffix +from nipype.interfaces import freesurfer as fs +from nipype.interfaces import fsl +from nipype.interfaces.ants import registration, resampling from nipype.interfaces.base import ( - traits, - isdefined, File, + isdefined, + traits, ) from nipype.interfaces.mixins import reporting -from nipype.interfaces import freesurfer as fs -from nipype.interfaces import fsl -from nipype.interfaces.ants import registration, resampling +from nipype.utils.filemanip import fname_presuffix from ... import NIWORKFLOWS_LOG -from . import base as nrb -from ..norm import ( - _SpatialNormalizationInputSpec, - _SpatialNormalizationOutputSpec, - SpatialNormalization, -) from ..fixes import ( FixHeaderApplyTransforms as ApplyTransforms, +) +from ..fixes import ( FixHeaderRegistration as Registration, ) +from ..norm import ( + SpatialNormalization, + _SpatialNormalizationInputSpec, + _SpatialNormalizationOutputSpec, +) +from . import base as nrb class _SpatialNormalizationInputSpecRPT( @@ -66,15 +69,15 @@ class SpatialNormalizationRPT(nrb.RegistrationRC, SpatialNormalization): def _post_run_hook(self, runtime): # We need to dig into the internal ants.Registration interface - self._fixed_image = self._get_ants_args()["fixed_image"] + self._fixed_image = self._get_ants_args()['fixed_image'] if isinstance(self._fixed_image, (list, tuple)): self._fixed_image = self._fixed_image[0] # get first item if list - if self._get_ants_args().get("fixed_image_mask") is not None: - self._fixed_image_mask = self._get_ants_args().get("fixed_image_mask") + if self._get_ants_args().get('fixed_image_mask') is not None: + self._fixed_image_mask = self._get_ants_args().get('fixed_image_mask') self._moving_image = self.aggregate_outputs(runtime=runtime).warped_image NIWORKFLOWS_LOG.info( - "Report - setting fixed (%s) and moving (%s) images", + 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image, ) @@ -102,7 +105,7 @@ def _post_run_hook(self, runtime): self._fixed_image = self.inputs.fixed_image[0] self._moving_image = self.aggregate_outputs(runtime=runtime).warped_image NIWORKFLOWS_LOG.info( - "Report - setting fixed (%s) and moving (%s) images", + 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image, ) @@ -130,7 +133,7 @@ def _post_run_hook(self, runtime): self._fixed_image = self.inputs.reference_image self._moving_image = self.aggregate_outputs(runtime=runtime).output_image NIWORKFLOWS_LOG.info( - "Report - setting fixed (%s) and moving (%s) images", + 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image, ) @@ -138,15 +141,11 @@ def _post_run_hook(self, runtime): return super()._post_run_hook(runtime) -class _ApplyTOPUPInputSpecRPT( - nrb._SVGReportCapableInputSpec, fsl.epi.ApplyTOPUPInputSpec -): - wm_seg = File(argstr="-wmseg %s", desc="reference white matter segmentation mask") +class _ApplyTOPUPInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.epi.ApplyTOPUPInputSpec): + wm_seg = File(argstr='-wmseg %s', desc='reference white matter segmentation mask') -class _ApplyTOPUPOutputSpecRPT( - reporting.ReportCapableOutputSpec, fsl.epi.ApplyTOPUPOutputSpec -): +class _ApplyTOPUPOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.epi.ApplyTOPUPOutputSpec): pass @@ -157,15 +156,13 @@ class ApplyTOPUPRPT(nrb.RegistrationRC, fsl.ApplyTOPUP): def _post_run_hook(self, runtime): from nilearn.image import index_img - self._fixed_image_label = "after" - self._moving_image_label = "before" - self._fixed_image = index_img( - self.aggregate_outputs(runtime=runtime).out_corrected, 0 - ) + self._fixed_image_label = 'after' + self._moving_image_label = 'before' + self._fixed_image = index_img(self.aggregate_outputs(runtime=runtime).out_corrected, 0) self._moving_image = index_img(self.inputs.in_files[0], 0) self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( - "Report - setting corrected (%s) and warped (%s) images", + 'Report - setting corrected (%s) and warped (%s) images', self._fixed_image, self._moving_image, ) @@ -174,12 +171,10 @@ def _post_run_hook(self, runtime): class _FUGUEInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.FUGUEInputSpec): - wm_seg = File(argstr="-wmseg %s", desc="reference white matter segmentation mask") + wm_seg = File(argstr='-wmseg %s', desc='reference white matter segmentation mask') -class _FUGUEOutputSpecRPT( - reporting.ReportCapableOutputSpec, fsl.preprocess.FUGUEOutputSpec -): +class _FUGUEOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.FUGUEOutputSpec): pass @@ -188,13 +183,13 @@ class FUGUERPT(nrb.RegistrationRC, fsl.FUGUE): output_spec = _FUGUEOutputSpecRPT def _post_run_hook(self, runtime): - self._fixed_image_label = "after" - self._moving_image_label = "before" + self._fixed_image_label = 'after' + self._moving_image_label = 'before' self._fixed_image = self.aggregate_outputs(runtime=runtime).unwarped_file self._moving_image = self.inputs.in_file self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( - "Report - setting corrected (%s) and warped (%s) images", + 'Report - setting corrected (%s) and warped (%s) images', self._fixed_image, self._moving_image, ) @@ -206,9 +201,7 @@ class _FLIRTInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.FLIRTInp pass -class _FLIRTOutputSpecRPT( - reporting.ReportCapableOutputSpec, fsl.preprocess.FLIRTOutputSpec -): +class _FLIRTOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.FLIRTOutputSpec): pass @@ -221,7 +214,7 @@ def _post_run_hook(self, runtime): self._moving_image = self.aggregate_outputs(runtime=runtime).out_file self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( - "Report - setting fixed (%s) and moving (%s) images", + 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image, ) @@ -229,9 +222,7 @@ def _post_run_hook(self, runtime): return super()._post_run_hook(runtime) -class _ApplyXFMInputSpecRPT( - nrb._SVGReportCapableInputSpec, fsl.preprocess.ApplyXFMInputSpec -): +class _ApplyXFMInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.ApplyXFMInputSpec): pass @@ -240,7 +231,7 @@ class ApplyXFMRPT(FLIRTRPT, fsl.ApplyXFM): output_spec = _FLIRTOutputSpecRPT -if LooseVersion("0.0.0") < fs.Info.looseversion() < LooseVersion("6.0.0"): +if LooseVersion('0.0.0') < fs.Info.looseversion() < LooseVersion('6.0.0'): _BBRegisterInputSpec = fs.preprocess.BBRegisterInputSpec else: _BBRegisterInputSpec = fs.preprocess.BBRegisterInputSpec6 @@ -253,9 +244,9 @@ class _BBRegisterInputSpecRPT(nrb._SVGReportCapableInputSpec, _BBRegisterInputSp File, default=True, usedefault=True, - argstr="--lta %s", - min_ver="5.2.0", - desc="write the transformation matrix in LTA format", + argstr='--lta %s', + min_ver='5.2.0', + desc='write the transformation matrix in LTA format', ) @@ -271,23 +262,23 @@ class BBRegisterRPT(nrb.RegistrationRC, fs.BBRegister): def _post_run_hook(self, runtime): outputs = self.aggregate_outputs(runtime=runtime) - mri_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, "mri") - target_file = os.path.join(mri_dir, "brainmask.mgz") + mri_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, 'mri') + target_file = os.path.join(mri_dir, 'brainmask.mgz') # Apply transform for simplicity mri_vol2vol = fs.ApplyVolTransform( source_file=self.inputs.source_file, target_file=target_file, lta_file=outputs.out_lta_file, - interp="nearest", + interp='nearest', ) res = mri_vol2vol.run() self._fixed_image = target_file self._moving_image = res.outputs.transformed_file - self._contour = os.path.join(mri_dir, "ribbon.mgz") + self._contour = os.path.join(mri_dir, 'ribbon.mgz') NIWORKFLOWS_LOG.info( - "Report - setting fixed (%s) and moving (%s) images", + 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image, ) @@ -295,9 +286,7 @@ def _post_run_hook(self, runtime): return super()._post_run_hook(runtime) -class _MRICoregInputSpecRPT( - nrb._SVGReportCapableInputSpec, fs.registration.MRICoregInputSpec -): +class _MRICoregInputSpecRPT(nrb._SVGReportCapableInputSpec, fs.registration.MRICoregInputSpec): pass @@ -315,30 +304,28 @@ def _post_run_hook(self, runtime): outputs = self.aggregate_outputs(runtime=runtime) mri_dir = None if isdefined(self.inputs.subject_id): - mri_dir = os.path.join( - self.inputs.subjects_dir, self.inputs.subject_id, "mri" - ) + mri_dir = os.path.join(self.inputs.subjects_dir, self.inputs.subject_id, 'mri') if isdefined(self.inputs.reference_file): target_file = self.inputs.reference_file else: - target_file = os.path.join(mri_dir, "brainmask.mgz") + target_file = os.path.join(mri_dir, 'brainmask.mgz') # Apply transform for simplicity mri_vol2vol = fs.ApplyVolTransform( source_file=self.inputs.source_file, target_file=target_file, lta_file=outputs.out_lta_file, - interp="nearest", + interp='nearest', ) res = mri_vol2vol.run() self._fixed_image = target_file self._moving_image = res.outputs.transformed_file if mri_dir is not None: - self._contour = os.path.join(mri_dir, "ribbon.mgz") + self._contour = os.path.join(mri_dir, 'ribbon.mgz') NIWORKFLOWS_LOG.info( - "Report - setting fixed (%s) and moving (%s) images", + 'Report - setting fixed (%s) and moving (%s) images', self._fixed_image, self._moving_image, ) @@ -347,21 +334,19 @@ def _post_run_hook(self, runtime): class _SimpleBeforeAfterInputSpecRPT(nrb._SVGReportCapableInputSpec): - before = File(exists=True, mandatory=True, desc="file before") - after = File(exists=True, mandatory=True, desc="file after") - wm_seg = File(desc="reference white matter segmentation mask") - before_label = traits.Str("before", usedefault=True) - after_label = traits.Str("after", usedefault=True) - dismiss_affine = traits.Bool( - False, usedefault=True, desc="rotate image(s) to cardinal axes" - ) + before = File(exists=True, mandatory=True, desc='file before') + after = File(exists=True, mandatory=True, desc='file after') + wm_seg = File(desc='reference white matter segmentation mask') + before_label = traits.Str('before', usedefault=True) + after_label = traits.Str('after', usedefault=True) + dismiss_affine = traits.Bool(False, usedefault=True, desc='rotate image(s) to cardinal axes') class SimpleBeforeAfterRPT(nrb.RegistrationRC, nrb.ReportingInterface): input_spec = _SimpleBeforeAfterInputSpecRPT def _post_run_hook(self, runtime): - """ there is not inner interface to run """ + """there is not inner interface to run""" self._fixed_image_label = self.inputs.after_label self._moving_image_label = self.inputs.before_label self._fixed_image = self.inputs.after @@ -369,7 +354,7 @@ def _post_run_hook(self, runtime): self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None self._dismiss_affine = self.inputs.dismiss_affine NIWORKFLOWS_LOG.info( - "Report - setting before (%s) and after (%s) images", + 'Report - setting before (%s) and after (%s) images', self._fixed_image, self._moving_image, ) @@ -378,7 +363,7 @@ def _post_run_hook(self, runtime): class _ResampleBeforeAfterInputSpecRPT(_SimpleBeforeAfterInputSpecRPT): - base = traits.Enum("before", "after", usedefault=True, mandatory=True) + base = traits.Enum('before', 'after', usedefault=True, mandatory=True) class ResampleBeforeAfterRPT(SimpleBeforeAfterRPT): @@ -389,31 +374,25 @@ def _post_run_hook(self, runtime): self._fixed_image = self.inputs.after self._moving_image = self.inputs.before - if self.inputs.base == "before": + if self.inputs.base == 'before': resampled_after = nli.resample_to_img(self._fixed_image, self._moving_image) - fname = fname_presuffix( - self._fixed_image, suffix="_resampled", newpath=runtime.cwd - ) + fname = fname_presuffix(self._fixed_image, suffix='_resampled', newpath=runtime.cwd) resampled_after.to_filename(fname) self._fixed_image = fname else: - resampled_before = nli.resample_to_img( - self._moving_image, self._fixed_image - ) - fname = fname_presuffix( - self._moving_image, suffix="_resampled", newpath=runtime.cwd - ) + resampled_before = nli.resample_to_img(self._moving_image, self._fixed_image) + fname = fname_presuffix(self._moving_image, suffix='_resampled', newpath=runtime.cwd) resampled_before.to_filename(fname) self._moving_image = fname self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None NIWORKFLOWS_LOG.info( - "Report - setting before (%s) and after (%s) images", + 'Report - setting before (%s) and after (%s) images', self._fixed_image, self._moving_image, ) runtime = super()._post_run_hook(runtime) - NIWORKFLOWS_LOG.info("Successfully created report (%s)", self._out_report) + NIWORKFLOWS_LOG.info('Successfully created report (%s)', self._out_report) os.unlink(fname) return runtime diff --git a/niworkflows/interfaces/reportlets/segmentation.py b/niworkflows/interfaces/reportlets/segmentation.py index 918372771e0..642dea987c7 100644 --- a/niworkflows/interfaces/reportlets/segmentation.py +++ b/niworkflows/interfaces/reportlets/segmentation.py @@ -21,11 +21,13 @@ # https://www.nipreps.org/community/licensing/ # """ReportCapableInterfaces for segmentation tools.""" + import os +from nipype.interfaces import freesurfer, fsl from nipype.interfaces.base import File, isdefined -from nipype.interfaces import fsl, freesurfer from nipype.interfaces.mixins import reporting + from ... import NIWORKFLOWS_LOG from . import base as nrb @@ -34,9 +36,7 @@ class _FASTInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.preprocess.FASTInput pass -class _FASTOutputSpecRPT( - reporting.ReportCapableOutputSpec, fsl.preprocess.FASTOutputSpec -): +class _FASTOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.preprocess.FASTOutputSpec): pass @@ -63,8 +63,8 @@ def _post_run_hook(self, runtime): self._masked = False NIWORKFLOWS_LOG.info( - "Generating report for FAST (in_files %s, " - "segmentation %s, individual tissue classes %s).", + 'Generating report for FAST (in_files %s, ' + 'segmentation %s, individual tissue classes %s).', self.inputs.in_files, outputs.tissue_class_map, outputs.tissue_class_files, @@ -95,35 +95,29 @@ def _post_run_hook(self, runtime): overlaid""" outputs = self.aggregate_outputs(runtime=runtime) self._anat_file = os.path.join( - outputs.subjects_dir, outputs.subject_id, "mri", "brain.mgz" - ) - self._contour = os.path.join( - outputs.subjects_dir, outputs.subject_id, "mri", "ribbon.mgz" + outputs.subjects_dir, outputs.subject_id, 'mri', 'brain.mgz' ) + self._contour = os.path.join(outputs.subjects_dir, outputs.subject_id, 'mri', 'ribbon.mgz') self._masked = False - NIWORKFLOWS_LOG.info( - "Generating report for ReconAll (subject %s)", outputs.subject_id - ) + NIWORKFLOWS_LOG.info('Generating report for ReconAll (subject %s)', outputs.subject_id) return super()._post_run_hook(runtime) class _MELODICInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.model.MELODICInputSpec): out_report = File( - "melodic_reportlet.svg", + 'melodic_reportlet.svg', usedefault=True, - desc="Filename for the visual report generated by Nipype.", + desc='Filename for the visual report generated by Nipype.', ) report_mask = File( - desc="Mask used to draw the outline on the reportlet. " - "If not set the mask will be derived from the data." + desc='Mask used to draw the outline on the reportlet. ' + 'If not set the mask will be derived from the data.' ) -class _MELODICOutputSpecRPT( - reporting.ReportCapableOutputSpec, fsl.model.MELODICOutputSpec -): +class _MELODICOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.model.MELODICOutputSpec): pass @@ -143,7 +137,7 @@ def _post_run_hook(self, runtime): if not self.generate_report: return runtime - NIWORKFLOWS_LOG.info("Generating report for MELODIC.") + NIWORKFLOWS_LOG.info('Generating report for MELODIC.') _melodic_dir = runtime.cwd if isdefined(self.inputs.out_dir): _melodic_dir = self.inputs.out_dir @@ -151,18 +145,14 @@ def _post_run_hook(self, runtime): self._out_report = self.inputs.out_report if not os.path.isabs(self._out_report): - self._out_report = os.path.abspath( - os.path.join(runtime.cwd, self._out_report) - ) + self._out_report = os.path.abspath(os.path.join(runtime.cwd, self._out_report)) - mix = os.path.join(self._melodic_dir, "melodic_mix") + mix = os.path.join(self._melodic_dir, 'melodic_mix') if not os.path.exists(mix): - NIWORKFLOWS_LOG.warning( - "MELODIC outputs not found, assuming it didn't converge." - ) - self._out_report = self._out_report.replace(".svg", ".html") - snippet = "

MELODIC did not converge, no output

" - with open(self._out_report, "w") as fobj: + NIWORKFLOWS_LOG.warning("MELODIC outputs not found, assuming it didn't converge.") + self._out_report = self._out_report.replace('.svg', '.html') + snippet = '

MELODIC did not converge, no output

' + with open(self._out_report, 'w') as fobj: fobj.write(snippet) return runtime @@ -175,7 +165,7 @@ def _list_outputs(self): except NotImplementedError: outputs = {} if self._out_report is not None: - outputs["out_report"] = self._out_report + outputs['out_report'] = self._out_report return outputs def _generate_report(self): @@ -191,23 +181,19 @@ def _generate_report(self): ) -class _ICA_AROMAInputSpecRPT( - nrb._SVGReportCapableInputSpec, fsl.aroma.ICA_AROMAInputSpec -): +class _ICA_AROMAInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.aroma.ICA_AROMAInputSpec): out_report = File( - "ica_aroma_reportlet.svg", + 'ica_aroma_reportlet.svg', usedefault=True, - desc="Filename for the visual report generated by Nipype.", + desc='Filename for the visual report generated by Nipype.', ) report_mask = File( - desc="Mask used to draw the outline on the reportlet. " - "If not set the mask will be derived from the data." + desc='Mask used to draw the outline on the reportlet. ' + 'If not set the mask will be derived from the data.' ) -class _ICA_AROMAOutputSpecRPT( - reporting.ReportCapableOutputSpec, fsl.aroma.ICA_AROMAOutputSpec -): +class _ICA_AROMAOutputSpecRPT(reporting.ReportCapableOutputSpec, fsl.aroma.ICA_AROMAOutputSpec): pass @@ -229,10 +215,8 @@ def _generate_report(self): def _post_run_hook(self, runtime): outputs = self.aggregate_outputs(runtime=runtime) - self._noise_components_file = os.path.join( - outputs.out_dir, "classified_motion_ICs.txt" - ) + self._noise_components_file = os.path.join(outputs.out_dir, 'classified_motion_ICs.txt') - NIWORKFLOWS_LOG.info("Generating report for ICA AROMA") + NIWORKFLOWS_LOG.info('Generating report for ICA AROMA') return super()._post_run_hook(runtime) diff --git a/niworkflows/interfaces/space.py b/niworkflows/interfaces/space.py index 9ef19c53430..a7569c26ed1 100644 --- a/niworkflows/interfaces/space.py +++ b/niworkflows/interfaces/space.py @@ -21,26 +21,25 @@ # https://www.nipreps.org/community/licensing/ # """Interfaces for handling spaces.""" + from nipype.interfaces.base import ( - traits, - TraitedSpec, BaseInterfaceInputSpec, SimpleInterface, + TraitedSpec, + traits, ) class _SpaceDataSourceInputSpec(BaseInterfaceInputSpec): - in_tuple = traits.Tuple( - (traits.Str, traits.Dict), mandatory=True, desc="a space declaration" - ) + in_tuple = traits.Tuple((traits.Str, traits.Dict), mandatory=True, desc='a space declaration') class _SpaceDataSourceOutputSpec(TraitedSpec): - space = traits.Str(desc="the space identifier, after dropping the cohort modifier.") - cohort = traits.Str(desc="a cohort specifier") - resolution = traits.Str(desc="a resolution specifier") - density = traits.Str(desc="a density specifier") - uid = traits.Str(desc="a unique identifier combining space specifications") + space = traits.Str(desc='the space identifier, after dropping the cohort modifier.') + cohort = traits.Str(desc='a cohort specifier') + resolution = traits.Str(desc='a resolution specifier') + density = traits.Str(desc='a density specifier') + uid = traits.Str(desc='a unique identifier combining space specifications') class SpaceDataSource(SimpleInterface): @@ -78,5 +77,5 @@ def _run_interface(self, runtime): from ..utils.spaces import format_reference, reference2dict self._results = reference2dict(self.inputs.in_tuple) - self._results["uid"] = format_reference(self.inputs.in_tuple) + self._results['uid'] = format_reference(self.inputs.in_tuple) return runtime diff --git a/niworkflows/interfaces/surf.py b/niworkflows/interfaces/surf.py index 0606805bf0e..2a1fd76da21 100644 --- a/niworkflows/interfaces/surf.py +++ b/niworkflows/interfaces/surf.py @@ -21,46 +21,45 @@ # https://www.nipreps.org/community/licensing/ # """Handling surfaces.""" + import os import re -from pathlib import Path from collections import defaultdict +from pathlib import Path -import numpy as np import nibabel as nb - -from nipype.utils.filemanip import fname_presuffix +import numpy as np from nipype.interfaces.base import ( BaseInterfaceInputSpec, - TraitedSpec, - DynamicTraitedSpec, - SimpleInterface, CommandLine, CommandLineInputSpec, + DynamicTraitedSpec, File, - traits, - isdefined, InputMultiPath, OutputMultiPath, + SimpleInterface, + TraitedSpec, Undefined, + isdefined, + traits, ) - +from nipype.utils.filemanip import fname_presuffix SECONDARY_ANAT_STRUC = { - "smoothwm": "GrayWhite", - "white": "GrayWhite", - "pial": "Pial", - "midthickness": "GrayMid", + 'smoothwm': 'GrayWhite', + 'white': 'GrayWhite', + 'pial': 'Pial', + 'midthickness': 'GrayMid', } class _NormalizeSurfInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, exists=True, desc="Freesurfer-generated GIFTI file") - transform_file = File(exists=True, desc="FSL or LTA affine transform file") + in_file = File(mandatory=True, exists=True, desc='Freesurfer-generated GIFTI file') + transform_file = File(exists=True, desc='FSL or LTA affine transform file') class _NormalizeSurfOutputSpec(TraitedSpec): - out_file = File(desc="output file with re-centered GIFTI coordinates") + out_file = File(desc='output file with re-centered GIFTI coordinates') class NormalizeSurf(SimpleInterface): @@ -107,14 +106,14 @@ def _run_interface(self, runtime): transform_file = self.inputs.transform_file if not isdefined(transform_file): transform_file = None - self._results["out_file"] = normalize_surfs( + self._results['out_file'] = normalize_surfs( self.inputs.in_file, transform_file, newpath=runtime.cwd ) return runtime class _Path2BIDSInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, desc="input GIFTI file") + in_file = File(mandatory=True, desc='input GIFTI file') class _Path2BIDSOutputSpec(DynamicTraitedSpec): @@ -166,10 +165,10 @@ class Path2BIDS(SimpleInterface): input_spec = _Path2BIDSInputSpec output_spec = _Path2BIDSOutputSpec _pattern = re.compile( - r"(?P[lr])h.(?P(white|smoothwm|pial|midthickness|" - r"inflated|vinflated|sphere|flat|sulc|curv|thickness))[\w\d_-]*(?P\.\w+)?" + r'(?P[lr])h.(?P(white|smoothwm|pial|midthickness|' + r'inflated|vinflated|sphere|flat|sulc|curv|thickness))[\w\d_-]*(?P\.\w+)?' ) - _excluded = ("extprefix",) + _excluded = ('extprefix',) def __init__(self, pattern=None, **inputs): """Initialize the interface.""" @@ -191,26 +190,26 @@ def _outputs(self): def _run_interface(self, runtime): in_file = Path(self.inputs.in_file) - extension = "".join(in_file.suffixes[-((in_file.suffixes[-1] == ".gz") + 1):]) + extension = ''.join(in_file.suffixes[-((in_file.suffixes[-1] == '.gz') + 1) :]) info = self._pattern.match(in_file.name[: -len(extension)]).groupdict() - self._results["extension"] = f"{info.pop('extprefix', None) or ''}{extension}" + self._results['extension'] = f"{info.pop('extprefix', None) or ''}{extension}" self._results.update(info) - if "hemi" in self._results: - self._results["hemi"] = self._results["hemi"].upper() + if 'hemi' in self._results: + self._results['hemi'] = self._results['hemi'].upper() return runtime class _GiftiNameSourceInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, exists=True, desc="input GIFTI file") + in_file = File(mandatory=True, exists=True, desc='input GIFTI file') pattern = traits.Str( mandatory=True, desc='input file name pattern (must capture named group "LR")' ) - template = traits.Str(mandatory=True, desc="output file name template") - template_kwargs = traits.Dict(desc="additional template keyword value pairs") + template = traits.Str(mandatory=True, desc='output file name template') + template_kwargs = traits.Dict(desc='additional template keyword value pairs') class _GiftiNameSourceOutputSpec(TraitedSpec): - out_name = traits.Str(desc="(partial) filename formatted according to template") + out_name = traits.Str(desc='(partial) filename formatted according to template') class GiftiNameSource(SimpleInterface): @@ -267,6 +266,7 @@ class GiftiNameSource(SimpleInterface): .. _GIFTI Standard: https://www.nitrc.org/frs/download.php/2871/GIFTI_Surface_Format.pdf """ + input_spec = _GiftiNameSourceInputSpec output_spec = _GiftiNameSourceOutputSpec @@ -274,22 +274,20 @@ def _run_interface(self, runtime): in_format = re.compile(self.inputs.pattern) in_file = os.path.basename(self.inputs.in_file) info = in_format.match(in_file).groupdict() - info["LR"] = info["LR"].upper() + info['LR'] = info['LR'].upper() if self.inputs.template_kwargs: info.update(self.inputs.template_kwargs) filefmt = self.inputs.template - self._results["out_name"] = filefmt.format(**info) + self._results['out_name'] = filefmt.format(**info) return runtime class _GiftiSetAnatomicalStructureInputSpec(BaseInterfaceInputSpec): - in_file = File( - mandatory=True, exists=True, desc='GIFTI file beginning with "lh." or "rh."' - ) + in_file = File(mandatory=True, exists=True, desc='GIFTI file beginning with "lh." or "rh."') class _GiftiSetAnatomicalStructureOutputSpec(TraitedSpec): - out_file = File(desc="output file with updated AnatomicalStructurePrimary entry") + out_file = File(desc='output file with updated AnatomicalStructurePrimary entry') class GiftiSetAnatomicalStructure(SimpleInterface): @@ -313,32 +311,28 @@ class GiftiSetAnatomicalStructure(SimpleInterface): def _run_interface(self, runtime): img = nb.load(self.inputs.in_file) - if any(nvpair.name == "AnatomicalStruturePrimary" for nvpair in img.meta.data): + if any(nvpair.name == 'AnatomicalStruturePrimary' for nvpair in img.meta.data): out_file = self.inputs.in_file else: fname = os.path.basename(self.inputs.in_file) - if fname[:3] in ("lh.", "rh."): - asp = "CortexLeft" if fname[0] == "l" else "CortexRight" + if fname[:3] in ('lh.', 'rh.'): + asp = 'CortexLeft' if fname[0] == 'l' else 'CortexRight' else: - raise ValueError( - "AnatomicalStructurePrimary cannot be derived from filename" - ) - img.meta.data.insert( - 0, nb.gifti.GiftiNVPairs("AnatomicalStructurePrimary", asp) - ) + raise ValueError('AnatomicalStructurePrimary cannot be derived from filename') + img.meta.data.insert(0, nb.gifti.GiftiNVPairs('AnatomicalStructurePrimary', asp)) out_file = os.path.join(runtime.cwd, fname) img.to_filename(out_file) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime class _GiftiToCSVInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, exists=True, desc="GIFTI file") - itk_lps = traits.Bool(False, usedefault=True, desc="flip XY axes") + in_file = File(mandatory=True, exists=True, desc='GIFTI file') + itk_lps = traits.Bool(False, usedefault=True, desc='flip XY axes') class _GiftiToCSVOutputSpec(TraitedSpec): - out_file = File(desc="output csv file") + out_file = File(desc='output csv file') class GiftiToCSV(SimpleInterface): @@ -359,27 +353,27 @@ def _run_interface(self, runtime): csvdata = np.hstack((data, np.zeros((data.shape[0], 3)))) out_file = fname_presuffix( - self.inputs.in_file, newpath=runtime.cwd, use_ext=False, suffix="points.csv" + self.inputs.in_file, newpath=runtime.cwd, use_ext=False, suffix='points.csv' ) np.savetxt( out_file, csvdata, - delimiter=",", - header="x,y,z,t,label,comment", - fmt=["%.5f"] * 4 + ["%d"] * 2, + delimiter=',', + header='x,y,z,t,label,comment', + fmt=['%.5f'] * 4 + ['%d'] * 2, ) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime class _CSVToGiftiInputSpec(BaseInterfaceInputSpec): - in_file = File(mandatory=True, exists=True, desc="CSV file") - gii_file = File(mandatory=True, exists=True, desc="reference GIfTI file") - itk_lps = traits.Bool(False, usedefault=True, desc="flip XY axes") + in_file = File(mandatory=True, exists=True, desc='CSV file') + gii_file = File(mandatory=True, exists=True, desc='reference GIfTI file') + itk_lps = traits.Bool(False, usedefault=True, desc='flip XY axes') class _CSVToGiftiOutputSpec(TraitedSpec): - out_file = File(desc="output GIfTI file") + out_file = File(desc='output GIfTI file') class CSVToGifti(SimpleInterface): @@ -391,31 +385,27 @@ class CSVToGifti(SimpleInterface): def _run_interface(self, runtime): gii = nb.load(self.inputs.gii_file) - data = np.loadtxt( - self.inputs.in_file, delimiter=",", skiprows=1, usecols=(0, 1, 2) - ) + data = np.loadtxt(self.inputs.in_file, delimiter=',', skiprows=1, usecols=(0, 1, 2)) if self.inputs.itk_lps: # ITK: flip X and Y around 0 data[:, :2] *= -1 gii.darrays[0].data = data[:, :3].astype(gii.darrays[0].data.dtype) out_file = fname_presuffix( - self.inputs.gii_file, newpath=runtime.cwd, suffix=".transformed" + self.inputs.gii_file, newpath=runtime.cwd, suffix='.transformed' ) gii.to_filename(out_file) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime class _SurfacesToPointCloudInputSpec(BaseInterfaceInputSpec): - in_files = InputMultiPath( - File(exists=True), mandatory=True, desc="input GIfTI files" - ) - out_file = File("pointcloud.ply", usedefault=True, desc="output file name") + in_files = InputMultiPath(File(exists=True), mandatory=True, desc='input GIfTI files') + out_file = File('pointcloud.ply', usedefault=True, desc='output file name') class _SurfacesToPointCloudOutputSpec(TraitedSpec): - out_file = File(desc="output pointcloud in PLY format") + out_file = File(desc='output pointcloud in PLY format') class SurfacesToPointCloud(SimpleInterface): @@ -430,12 +420,10 @@ def _run_interface(self, runtime): giis = [nb.load(g) for g in self.inputs.in_files] vertices = np.vstack([g.darrays[0].data for g in giis]) - norms = np.vstack( - [vertex_normals(g.darrays[0].data, g.darrays[1].data) for g in giis] - ) + norms = np.vstack([vertex_normals(g.darrays[0].data, g.darrays[1].data) for g in giis]) out_file = Path(self.inputs.out_file).resolve() pointcloud2ply(vertices, norms, out_file=out_file) - self._results["out_file"] = str(out_file) + self._results['out_file'] = str(out_file) return runtime @@ -443,20 +431,20 @@ class _PoissonReconInputSpec(CommandLineInputSpec): in_file = File( exists=True, mandatory=True, - argstr="--in %s", - desc="input PLY pointcloud (vertices + normals)", + argstr='--in %s', + desc='input PLY pointcloud (vertices + normals)', ) out_file = File( - argstr="--out %s", + argstr='--out %s', keep_extension=True, - name_source=["in_file"], - name_template="%s_avg", - desc="output PLY triangular mesh", + name_source=['in_file'], + name_template='%s_avg', + desc='output PLY triangular mesh', ) class _PoissonReconOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output PLY triangular mesh") + out_file = File(exists=True, desc='output PLY triangular mesh') class PoissonRecon(CommandLine): @@ -467,16 +455,16 @@ class PoissonRecon(CommandLine): input_spec = _PoissonReconInputSpec output_spec = _PoissonReconOutputSpec - _cmd = "PoissonRecon" + _cmd = 'PoissonRecon' class _PLYtoGiftiInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input PLY file") - surf_key = traits.Str(mandatory=True, desc="reference GIfTI file") + in_file = File(exists=True, mandatory=True, desc='input PLY file') + surf_key = traits.Str(mandatory=True, desc='reference GIfTI file') class _PLYtoGiftiOutputSpec(TraitedSpec): - out_file = File(desc="output GIfTI file") + out_file = File(desc='output GIfTI file') class PLYtoGifti(SimpleInterface): @@ -489,51 +477,51 @@ def _run_interface(self, runtime): from pathlib import Path meta = { - "GeometricType": "Anatomical", - "VolGeomWidth": "256", - "VolGeomHeight": "256", - "VolGeomDepth": "256", - "VolGeomXsize": "1.0", - "VolGeomYsize": "1.0", - "VolGeomZsize": "1.0", - "VolGeomX_R": "-1.0", - "VolGeomX_A": "0.0", - "VolGeomX_S": "0.0", - "VolGeomY_R": "0.0", - "VolGeomY_A": "0.0", - "VolGeomY_S": "-1.0", - "VolGeomZ_R": "0.0", - "VolGeomZ_A": "1.0", - "VolGeomZ_S": "0.0", - "VolGeomC_R": "0.0", - "VolGeomC_A": "0.0", - "VolGeomC_S": "0.0", + 'GeometricType': 'Anatomical', + 'VolGeomWidth': '256', + 'VolGeomHeight': '256', + 'VolGeomDepth': '256', + 'VolGeomXsize': '1.0', + 'VolGeomYsize': '1.0', + 'VolGeomZsize': '1.0', + 'VolGeomX_R': '-1.0', + 'VolGeomX_A': '0.0', + 'VolGeomX_S': '0.0', + 'VolGeomY_R': '0.0', + 'VolGeomY_A': '0.0', + 'VolGeomY_S': '-1.0', + 'VolGeomZ_R': '0.0', + 'VolGeomZ_A': '1.0', + 'VolGeomZ_S': '0.0', + 'VolGeomC_R': '0.0', + 'VolGeomC_A': '0.0', + 'VolGeomC_S': '0.0', } - meta["AnatomicalStructurePrimary"] = "Cortex%s" % ( - "Left" if self.inputs.surf_key.startswith("lh") else "Right" + meta['AnatomicalStructurePrimary'] = 'Cortex%s' % ( + 'Left' if self.inputs.surf_key.startswith('lh') else 'Right' ) - meta["AnatomicalStructureSecondary"] = SECONDARY_ANAT_STRUC[ - self.inputs.surf_key.split(".")[-1] + meta['AnatomicalStructureSecondary'] = SECONDARY_ANAT_STRUC[ + self.inputs.surf_key.split('.')[-1] ] - meta["Name"] = "%s_average.gii" % self.inputs.surf_key + meta['Name'] = f'{self.inputs.surf_key}_average.gii' - out_file = Path(runtime.cwd) / meta["Name"] + out_file = Path(runtime.cwd) / meta['Name'] out_file = ply2gii(self.inputs.in_file, meta, out_file=out_file) - self._results["out_file"] = str(out_file) + self._results['out_file'] = str(out_file) return runtime class _UnzipJoinedSurfacesInputSpec(BaseInterfaceInputSpec): in_files = traits.List( - InputMultiPath(File(exists=True), mandatory=True, desc="input GIfTI files") + InputMultiPath(File(exists=True), mandatory=True, desc='input GIfTI files') ) class _UnzipJoinedSurfacesOutputSpec(TraitedSpec): out_files = traits.List( - OutputMultiPath(File(exists=True), desc="output pointcloud in PLY format") + OutputMultiPath(File(exists=True), desc='output pointcloud in PLY format') ) - surf_keys = traits.List(traits.Str, desc="surface identifier keys") + surf_keys = traits.List(traits.Str, desc='surface identifier keys') class UnzipJoinedSurfaces(SimpleInterface): @@ -550,10 +538,10 @@ def _run_interface(self, runtime): for f in in_files: bname = Path(f).name - groups[bname.split("_")[0]].append(f) + groups[bname.split('_')[0]].append(f) - self._results["out_files"] = [sorted(els) for els in groups.values()] - self._results["surf_keys"] = list(groups.keys()) + self._results['out_files'] = [sorted(els) for els in groups.values()] + self._results['surf_keys'] = list(groups.keys()) return runtime @@ -561,8 +549,8 @@ def _run_interface(self, runtime): class CreateSurfaceROIInputSpec(TraitedSpec): subject_id = traits.Str(desc='subject ID') hemisphere = traits.Enum( - "L", - "R", + 'L', + 'R', mandatory=True, desc='hemisphere', ) @@ -590,11 +578,11 @@ def _run_interface(self, runtime): subject = 'sub-XYZ' img = nb.GiftiImage.from_filename(self.inputs.thickness_file) # wb_command -set-structure (L282) - img.meta["AnatomicalStructurePrimary"] = {'L': 'CortexLeft', 'R': 'CortexRight'}[hemi] + img.meta['AnatomicalStructurePrimary'] = {'L': 'CortexLeft', 'R': 'CortexRight'}[hemi] darray = img.darrays[0] # wb_command -set-map-names (L284) meta = darray.meta - meta['Name'] = f"{subject}_{hemi}_ROI" + meta['Name'] = f'{subject}_{hemi}_ROI' # wb_command -metric-palette calls (L285, L289) have no effect on ROI files # Compiling an odd sequence of math operations (L283, L288, L290) that work out to: @@ -612,9 +600,9 @@ def _run_interface(self, runtime): meta=meta, ) - out_filename = os.path.join(runtime.cwd, f"{subject}.{hemi}.roi.native.shape.gii") + out_filename = os.path.join(runtime.cwd, f'{subject}.{hemi}.roi.native.shape.gii') img.to_filename(out_filename) - self._results["roi_file"] = out_filename + self._results['roi_file'] = out_filename return runtime @@ -632,23 +620,21 @@ def normalize_surfs(in_file, transform_file, newpath=None): img = nb.load(in_file) transform = load_transform(transform_file) - pointset = img.get_arrays_from_intent("NIFTI_INTENT_POINTSET")[0] + pointset = img.get_arrays_from_intent('NIFTI_INTENT_POINTSET')[0] coords = pointset.data.T - c_ras_keys = ("VolGeomC_R", "VolGeomC_A", "VolGeomC_S") + c_ras_keys = ('VolGeomC_R', 'VolGeomC_A', 'VolGeomC_S') ras = np.array([[float(pointset.metadata[key])] for key in c_ras_keys]) ones = np.ones((1, coords.shape[1]), dtype=coords.dtype) # Apply C_RAS translation to coordinates, then transform - pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype( - coords.dtype - ) + pointset.data = transform.dot(np.vstack((coords + ras, ones)))[:3].T.astype(coords.dtype) - secondary = nb.gifti.GiftiNVPairs("AnatomicalStructureSecondary", "MidThickness") - geom_type = nb.gifti.GiftiNVPairs("GeometricType", "Anatomical") + secondary = nb.gifti.GiftiNVPairs('AnatomicalStructureSecondary', 'MidThickness') + geom_type = nb.gifti.GiftiNVPairs('GeometricType', 'Anatomical') has_ass = has_geo = False for nvpair in pointset.meta.data: # Remove C_RAS translation from metadata to avoid double-dipping in FreeSurfer if nvpair.name in c_ras_keys: - nvpair.value = "0.000000" + nvpair.value = '0.000000' # Check for missing metadata elif nvpair.name == secondary.name: has_ass = True @@ -656,7 +642,7 @@ def normalize_surfs(in_file, transform_file, newpath=None): has_geo = True fname = os.path.basename(in_file) # Update metadata for MidThickness/graymid surfaces - if "midthickness" in fname.lower() or "graymid" in fname.lower(): + if 'midthickness' in fname.lower() or 'graymid' in fname.lower(): if not has_ass: pointset.meta.data.insert(1, secondary) if not has_geo: @@ -685,24 +671,24 @@ def load_transform(fname): if fname is None: return np.eye(4) - if fname.endswith(".mat"): + if fname.endswith('.mat'): return np.loadtxt(fname) - elif fname.endswith(".lta"): - with open(fname, "rb") as fobj: + elif fname.endswith('.lta'): + with open(fname, 'rb') as fobj: for line in fobj: - if line.startswith(b"1 4 4"): + if line.startswith(b'1 4 4'): break lines = fobj.readlines()[:4] return np.genfromtxt(lines) - raise ValueError("Unknown transform type; pass FSL (.mat) or LTA (.lta)") + raise ValueError('Unknown transform type; pass FSL (.mat) or LTA (.lta)') def vertex_normals(vertices, faces): """Calculates the normals of a triangular mesh""" def normalize_v3(arr): - """ Normalize a numpy array of 3 component vectors shape=(n,3) """ + """Normalize a numpy array of 3 component vectors shape=(n,3)""" lens = np.sqrt(arr[:, 0] ** 2 + arr[:, 1] ** 2 + arr[:, 2] ** 2) arr /= lens[:, np.newaxis] @@ -721,15 +707,16 @@ def normalize_v3(arr): def pointcloud2ply(vertices, normals, out_file=None): """Converts the file to PLY format""" from pathlib import Path + import pandas as pd from pyntcloud import PyntCloud df = pd.DataFrame(np.hstack((vertices, normals))) - df.columns = ["x", "y", "z", "nx", "ny", "nz"] + df.columns = ['x', 'y', 'z', 'nx', 'ny', 'nz'] cloud = PyntCloud(df) if out_file is None: - out_file = Path("pointcloud.ply").resolve() + out_file = Path('pointcloud.ply').resolve() cloud.to_file(str(out_file)) return out_file @@ -738,13 +725,14 @@ def pointcloud2ply(vertices, normals, out_file=None): def ply2gii(in_file, metadata, out_file=None): """Convert from ply to GIfTI""" from pathlib import Path - from numpy import eye + from nibabel.gifti import ( - GiftiMetaData, GiftiCoordSystem, - GiftiImage, GiftiDataArray, + GiftiImage, + GiftiMetaData, ) + from numpy import eye from pyntcloud import PyntCloud in_file = Path(in_file) @@ -753,24 +741,24 @@ def ply2gii(in_file, metadata, out_file=None): # Update centroid metadata metadata.update( zip( - ("SurfaceCenterX", "SurfaceCenterY", "SurfaceCenterZ"), - ["%.4f" % c for c in surf.centroid], + ('SurfaceCenterX', 'SurfaceCenterY', 'SurfaceCenterZ'), + [f'{c:.4f}' for c in surf.centroid], ) ) # Prepare data arrays da = ( GiftiDataArray( - data=surf.xyz.astype("float32"), - datatype="NIFTI_TYPE_FLOAT32", - intent="NIFTI_INTENT_POINTSET", + data=surf.xyz.astype('float32'), + datatype='NIFTI_TYPE_FLOAT32', + intent='NIFTI_INTENT_POINTSET', meta=GiftiMetaData.from_dict(metadata), coordsys=GiftiCoordSystem(xform=eye(4), xformspace=3), ), GiftiDataArray( data=surf.mesh.values, - datatype="NIFTI_TYPE_INT32", - intent="NIFTI_INTENT_TRIANGLE", + datatype='NIFTI_TYPE_INT32', + intent='NIFTI_INTENT_TRIANGLE', coordsys=None, ), ) @@ -778,7 +766,7 @@ def ply2gii(in_file, metadata, out_file=None): if out_file is None: out_file = fname_presuffix( - in_file.name, suffix=".gii", use_ext=False, newpath=str(Path.cwd()) + in_file.name, suffix='.gii', use_ext=False, newpath=str(Path.cwd()) ) surfgii.to_filename(str(out_file)) diff --git a/niworkflows/interfaces/tests/data/__init__.py b/niworkflows/interfaces/tests/data/__init__.py index 0eb4f637ab0..f3a8363d212 100644 --- a/niworkflows/interfaces/tests/data/__init__.py +++ b/niworkflows/interfaces/tests/data/__init__.py @@ -2,6 +2,7 @@ .. autofunction:: load_test_data """ + from acres import Loader load_test_data = Loader(__package__) diff --git a/niworkflows/interfaces/tests/test_bids.py b/niworkflows/interfaces/tests/test_bids.py index c983ea0d769..d531d0a8e6d 100644 --- a/niworkflows/interfaces/tests/test_bids.py +++ b/niworkflows/interfaces/tests/test_bids.py @@ -21,31 +21,31 @@ # https://www.nipreps.org/community/licensing/ # """Tests on BIDS compliance.""" -import sys -import os -from pathlib import Path + import json +import os from hashlib import sha1 +from pathlib import Path -import numpy as np import nibabel as nb +import numpy as np import pytest from nipype.interfaces.base import Undefined from packaging.version import Version -from .. import bids as bintfs from niworkflows.testing import needs_data_dir +from .. import bids as bintfs XFORM_CODES = { - "MNI152Lin": 4, - "T1w": 2, - "boldref": 2, + 'MNI152Lin': 4, + 'T1w': 2, + 'boldref': 2, None: 1, } -T1W_PATH = "ds054/sub-100185/anat/sub-100185_T1w.nii.gz" -BOLD_PATH = "ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz" +T1W_PATH = 'ds054/sub-100185/anat/sub-100185_T1w.nii.gz' +BOLD_PATH = 'ds054/sub-100185/func/sub-100185_task-machinegame_run-01_bold.nii.gz' def make_prep_and_save( @@ -76,171 +76,169 @@ def connect_and_run_save(prep_result, save): return save.run() -@pytest.mark.parametrize("interface", [bintfs.DerivativesDataSink, bintfs.PrepareDerivative]) -@pytest.mark.parametrize("out_path_base", [None, "fmriprep"]) +@pytest.mark.parametrize('interface', [bintfs.DerivativesDataSink, bintfs.PrepareDerivative]) +@pytest.mark.parametrize('out_path_base', [None, 'fmriprep']) @pytest.mark.parametrize( - "source,input_files,entities,expectation,checksum", + ('source', 'input_files', 'entities', 'expectation', 'checksum'), [ ( T1W_PATH, - ["anat.nii.gz"], - {"desc": "preproc"}, - "sub-100185/anat/sub-100185_desc-preproc_T1w.nii.gz", - "7c047921def32da260df4a985019b9f5231659fa", + ['anat.nii.gz'], + {'desc': 'preproc'}, + 'sub-100185/anat/sub-100185_desc-preproc_T1w.nii.gz', + '7c047921def32da260df4a985019b9f5231659fa', ), ( T1W_PATH, - ["anat.nii.gz"], - {"desc": "preproc", "space": "MNI"}, - "sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz", - "b22399f50ce454049d5d074457a92ab13e7fdf8c", + ['anat.nii.gz'], + {'desc': 'preproc', 'space': 'MNI'}, + 'sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz', + 'b22399f50ce454049d5d074457a92ab13e7fdf8c', ), ( T1W_PATH, - ["anat.nii.gz"], - {"desc": "preproc", "space": "MNI", "resolution": "native"}, - "sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz", - "b22399f50ce454049d5d074457a92ab13e7fdf8c", + ['anat.nii.gz'], + {'desc': 'preproc', 'space': 'MNI', 'resolution': 'native'}, + 'sub-100185/anat/sub-100185_space-MNI_desc-preproc_T1w.nii.gz', + 'b22399f50ce454049d5d074457a92ab13e7fdf8c', ), ( T1W_PATH, - ["anat.nii.gz"], - {"desc": "preproc", "space": "MNI", "resolution": "high"}, - "sub-100185/anat/sub-100185_space-MNI_res-high_desc-preproc_T1w.nii.gz", - "b22399f50ce454049d5d074457a92ab13e7fdf8c", + ['anat.nii.gz'], + {'desc': 'preproc', 'space': 'MNI', 'resolution': 'high'}, + 'sub-100185/anat/sub-100185_space-MNI_res-high_desc-preproc_T1w.nii.gz', + 'b22399f50ce454049d5d074457a92ab13e7fdf8c', ), ( T1W_PATH, - ["tfm.txt"], - {"from": "fsnative", "to": "T1w", "suffix": "xfm"}, - "sub-100185/anat/sub-100185_from-fsnative_to-T1w_mode-image_xfm.txt", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['tfm.txt'], + {'from': 'fsnative', 'to': 'T1w', 'suffix': 'xfm'}, + 'sub-100185/anat/sub-100185_from-fsnative_to-T1w_mode-image_xfm.txt', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( T1W_PATH, - ["tfm.h5"], - {"from": "MNI152NLin2009cAsym", "to": "T1w", "suffix": "xfm"}, - "sub-100185/anat/sub-100185_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['tfm.h5'], + {'from': 'MNI152NLin2009cAsym', 'to': 'T1w', 'suffix': 'xfm'}, + 'sub-100185/anat/sub-100185_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( T1W_PATH, - ["anat.nii.gz"], - {"desc": "brain", "suffix": "mask"}, - "sub-100185/anat/sub-100185_desc-brain_mask.nii.gz", - "7af86a1f6806a41078e4d2699d680dbe2b9f6ae2", + ['anat.nii.gz'], + {'desc': 'brain', 'suffix': 'mask'}, + 'sub-100185/anat/sub-100185_desc-brain_mask.nii.gz', + '7af86a1f6806a41078e4d2699d680dbe2b9f6ae2', ), ( T1W_PATH, - ["anat.nii.gz"], - {"desc": "brain", "suffix": "mask", "space": "MNI"}, - "sub-100185/anat/sub-100185_space-MNI_desc-brain_mask.nii.gz", - "1591f90e0da2a624c972784dda6a01b5572add15", + ['anat.nii.gz'], + {'desc': 'brain', 'suffix': 'mask', 'space': 'MNI'}, + 'sub-100185/anat/sub-100185_space-MNI_desc-brain_mask.nii.gz', + '1591f90e0da2a624c972784dda6a01b5572add15', ), ( T1W_PATH, - ["anat.surf.gii"], - {"suffix": "pial", "hemi": "L"}, - "sub-100185/anat/sub-100185_hemi-L_pial.surf.gii", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['anat.surf.gii'], + {'suffix': 'pial', 'hemi': 'L'}, + 'sub-100185/anat/sub-100185_hemi-L_pial.surf.gii', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( T1W_PATH, - ["aseg.nii", "aparc.nii"], - {"desc": ["aseg", "aparcaseg"], "suffix": "dseg"}, + ['aseg.nii', 'aparc.nii'], + {'desc': ['aseg', 'aparcaseg'], 'suffix': 'dseg'}, + [f'sub-100185/anat/sub-100185_desc-{s}_dseg.nii' for s in ('aseg', 'aparcaseg')], [ - f"sub-100185/anat/sub-100185_desc-{s}_dseg.nii" - for s in ("aseg", "aparcaseg") + '5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282', + '5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282', ], - ["5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282", - "5543a788bf3383d7a2fc41f5cff4e0bbb8f5f282"], ), ( T1W_PATH, - ["anat.nii", "anat.json"], - {"desc": "preproc"}, + ['anat.nii', 'anat.json'], + {'desc': 'preproc'}, + [f'sub-100185/anat/sub-100185_desc-preproc_T1w.{ext}' for ext in ('nii', 'json')], [ - f"sub-100185/anat/sub-100185_desc-preproc_T1w.{ext}" - for ext in ("nii", "json") + '25c107d4a3e6f98e48aa752c5bbd88ab8e8d069f', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ], - ["25c107d4a3e6f98e48aa752c5bbd88ab8e8d069f", - "da39a3ee5e6b4b0d3255bfef95601890afd80709"], ), ( T1W_PATH, - ["anat.nii.gz"] * 3, - {"label": ["GM", "WM", "CSF"], "suffix": "probseg"}, + ['anat.nii.gz'] * 3, + {'label': ['GM', 'WM', 'CSF'], 'suffix': 'probseg'}, [ - f"sub-100185/anat/sub-100185_label-{lab}_probseg.nii.gz" - for lab in ("GM", "WM", "CSF") + f'sub-100185/anat/sub-100185_label-{lab}_probseg.nii.gz' + for lab in ('GM', 'WM', 'CSF') ], - ["7c047921def32da260df4a985019b9f5231659fa"] * 3, + ['7c047921def32da260df4a985019b9f5231659fa'] * 3, ), # BOLD data ( BOLD_PATH, - ["aroma.csv"], - {"suffix": "AROMAnoiseICs"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_AROMAnoiseICs.csv", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['aroma.csv'], + {'suffix': 'AROMAnoiseICs'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_AROMAnoiseICs.csv', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( BOLD_PATH, - ["confounds.tsv"], - {"suffix": "regressors", "desc": "confounds"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_desc-confounds_regressors.tsv", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['confounds.tsv'], + {'suffix': 'regressors', 'desc': 'confounds'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_desc-confounds_regressors.tsv', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( BOLD_PATH, - ["mixing.tsv"], - {"suffix": "mixing", "desc": "MELODIC"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_desc-MELODIC_mixing.tsv", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['mixing.tsv'], + {'suffix': 'mixing', 'desc': 'MELODIC'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_desc-MELODIC_mixing.tsv', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( BOLD_PATH, - ["lh.func.gii"], - {"space": "fsaverage", "density": "10k", "hemi": "L"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_" - "hemi-L_space-fsaverage_den-10k_bold.func.gii", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['lh.func.gii'], + {'space': 'fsaverage', 'density': '10k', 'hemi': 'L'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_' + 'hemi-L_space-fsaverage_den-10k_bold.func.gii', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( BOLD_PATH, - ["hcp.dtseries.nii"], - {"space": "fsLR", "density": "91k"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_" - "space-fsLR_den-91k_bold.dtseries.nii", - "335f1394ce90b58bbf27026b6eeec4d2124c11da", + ['hcp.dtseries.nii'], + {'space': 'fsLR', 'density': '91k'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_' + 'space-fsLR_den-91k_bold.dtseries.nii', + '335f1394ce90b58bbf27026b6eeec4d2124c11da', ), ( BOLD_PATH, - ["ref.nii"], - {"space": "MNI", "suffix": "boldref"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_boldref.nii", - "53d9b486d08fec5a952f68fcbcddb38a72818d4c", + ['ref.nii'], + {'space': 'MNI', 'suffix': 'boldref'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_boldref.nii', + '53d9b486d08fec5a952f68fcbcddb38a72818d4c', ), ( BOLD_PATH, - ["dseg.nii"], - {"space": "MNI", "suffix": "dseg", "desc": "aseg"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-aseg_dseg.nii", - "ddadc9be8224eebe0177a65bf87300f275e17e96", + ['dseg.nii'], + {'space': 'MNI', 'suffix': 'dseg', 'desc': 'aseg'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-aseg_dseg.nii', + 'ddadc9be8224eebe0177a65bf87300f275e17e96', ), ( BOLD_PATH, - ["mask.nii"], - {"space": "MNI", "suffix": "mask", "desc": "brain"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-brain_mask.nii", - "f97a1877508139b42ea9fc476bdba367b001ab00", + ['mask.nii'], + {'space': 'MNI', 'suffix': 'mask', 'desc': 'brain'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-brain_mask.nii', + 'f97a1877508139b42ea9fc476bdba367b001ab00', ), ( BOLD_PATH, - ["bold.nii"], - {"space": "MNI", "desc": "preproc"}, - "sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-preproc_bold.nii", - "aa1eed935e6a8dcca646b0c78ee57218e30e2974", + ['bold.nii'], + {'space': 'MNI', 'desc': 'preproc'}, + 'sub-100185/func/sub-100185_task-machinegame_run-01_space-MNI_desc-preproc_bold.nii', + 'aa1eed935e6a8dcca646b0c78ee57218e30e2974', ), # Nondeterministic order - do we really need this to work, or we can stay safe with # MapNodes? @@ -250,42 +248,42 @@ def connect_and_run_save(prep_result, save): # for s in ("MNIa", "MNIb") for l in ("GM", "WM", "CSF")]), ( T1W_PATH, - ["anat.html"], - {"desc": "conform", "datatype": "figures"}, - "sub-100185/figures/sub-100185_desc-conform_T1w.html", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + ['anat.html'], + {'desc': 'conform', 'datatype': 'figures'}, + 'sub-100185/figures/sub-100185_desc-conform_T1w.html', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( BOLD_PATH, - ["aroma.csv"], - {"suffix": "AROMAnoiseICs", "extension": "h5"}, + ['aroma.csv'], + {'suffix': 'AROMAnoiseICs', 'extension': 'h5'}, ValueError, None, ), ( T1W_PATH, - ["anat.nii.gz"] * 3, - {"desc": "preproc", "space": "MNI"}, + ['anat.nii.gz'] * 3, + {'desc': 'preproc', 'space': 'MNI'}, ValueError, None, ), ( - "sub-07/ses-preop/anat/sub-07_ses-preop_T1w.nii.gz", - ["tfm.h5"], - {"from": "orig", "to": "target", "suffix": "xfm"}, - "sub-07/ses-preop/anat/sub-07_ses-preop_from-orig_to-target_mode-image_xfm.h5", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + 'sub-07/ses-preop/anat/sub-07_ses-preop_T1w.nii.gz', + ['tfm.h5'], + {'from': 'orig', 'to': 'target', 'suffix': 'xfm'}, + 'sub-07/ses-preop/anat/sub-07_ses-preop_from-orig_to-target_mode-image_xfm.h5', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ( - "sub-07/ses-preop/anat/sub-07_ses-preop_run-01_T1w.nii.gz", - ["tfm.txt"], - {"from": "orig", "to": "T1w", "suffix": "xfm"}, - "sub-07/ses-preop/anat/sub-07_ses-preop_run-01_from-orig_to-T1w_mode-image_xfm.txt", - "da39a3ee5e6b4b0d3255bfef95601890afd80709", + 'sub-07/ses-preop/anat/sub-07_ses-preop_run-01_T1w.nii.gz', + ['tfm.txt'], + {'from': 'orig', 'to': 'T1w', 'suffix': 'xfm'}, + 'sub-07/ses-preop/anat/sub-07_ses-preop_run-01_from-orig_to-T1w_mode-image_xfm.txt', + 'da39a3ee5e6b4b0d3255bfef95601890afd80709', ), ], ) -@pytest.mark.parametrize("dismiss_entities", [None, ("run", "session")]) +@pytest.mark.parametrize('dismiss_entities', [None, ('run', 'session')]) def test_DerivativesDataSink_build_path( tmp_path, interface, @@ -299,32 +297,35 @@ def test_DerivativesDataSink_build_path( ): """Check a few common derivatives generated by NiPreps.""" if interface is bintfs.PrepareDerivative and out_path_base is not None: - pytest.skip("PrepareDerivative does not support out_path_base") + pytest.skip('PrepareDerivative does not support out_path_base') ds_inputs = [] for input_file in input_files: fname = tmp_path / input_file - if fname.name.endswith(".dtseries.nii"): - axes = (nb.cifti2.SeriesAxis(start=0, step=2, size=20), - nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5)))) + if fname.name.endswith('.dtseries.nii'): + axes = ( + nb.cifti2.SeriesAxis(start=0, step=2, size=20), + nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5))), + ) hdr = nb.cifti2.cifti2_axes.to_header(axes) - cifti = nb.Cifti2Image(np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32), - header=hdr) - cifti.nifti_header.set_intent("ConnDenseSeries") + cifti = nb.Cifti2Image( + np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32), header=hdr + ) + cifti.nifti_header.set_intent('ConnDenseSeries') cifti.to_filename(fname) - elif fname.name.rstrip(".gz").endswith(".nii"): + elif fname.name.rstrip('.gz').endswith('.nii'): hdr = nb.Nifti1Header() hdr.set_qform(np.eye(4), code=2) hdr.set_sform(np.eye(4), code=2) - units = ("mm", "sec") if "bold" in input_file else ("mm",) - size = (10, 10, 10, 10) if "bold" in input_file else (10, 10, 10) + units = ('mm', 'sec') if 'bold' in input_file else ('mm',) + size = (10, 10, 10, 10) if 'bold' in input_file else (10, 10, 10) hdr.set_xyzt_units(*units) nb.Nifti1Image(np.zeros(size), np.eye(4), hdr).to_filename(fname) else: - fname.write_text("") + fname.write_text('') ds_inputs.append(str(fname)) - base_directory = tmp_path / "output" + base_directory = tmp_path / 'output' base_directory.mkdir() prep, save = make_prep_and_save( @@ -351,67 +352,63 @@ def test_DerivativesDataSink_build_path( checksum = [checksum] if dismiss_entities: - if "run" in dismiss_entities: - expectation = [e.replace("_run-01", "") for e in expectation] + if 'run' in dismiss_entities: + expectation = [e.replace('_run-01', '') for e in expectation] - if "session" in dismiss_entities: + if 'session' in dismiss_entities: expectation = [ - e.replace("_ses-preop", "").replace("ses-preop/", "") - for e in expectation + e.replace('_ses-preop', '').replace('ses-preop/', '') for e in expectation ] - base = (out_path_base or "niworkflows") if interface == bintfs.DerivativesDataSink else "" + base = (out_path_base or 'niworkflows') if interface == bintfs.DerivativesDataSink else '' for out, exp in zip(output, expectation): assert Path(out).relative_to(base_directory) == Path(base) / exp for out, exp in zip(output, expectation): assert Path(out).relative_to(base_directory) == Path(base) / exp # Regression - some images were given nan scale factors - if out.endswith(".nii") or out.endswith(".nii.gz"): + if out.endswith('.nii') or out.endswith('.nii.gz'): img = nb.load(out) if isinstance(img, nb.Nifti1Image): with nb.openers.ImageOpener(out) as fobj: hdr = img.header.from_fileobj(fobj) - assert not np.isnan(hdr["scl_slope"]) - assert not np.isnan(hdr["scl_inter"]) + assert not np.isnan(hdr['scl_slope']) + assert not np.isnan(hdr['scl_inter']) for out, chksum in zip(output, checksum): - if chksum == "335f1394ce90b58bbf27026b6eeec4d2124c11da": - if sys.version_info < (3, 8): - # Python 3.8 began preserving insertion order of attributes in XML - # Therefore we get a different checksum before/after - chksum = "a37ffb1188dd9a7b708de5b8daef46dac56ef8d4" - elif Version(nb.__version__) < Version('5.3'): + if chksum == '335f1394ce90b58bbf27026b6eeec4d2124c11da': + if Version(nb.__version__) < Version('5.3'): # Nibabel 5.3 avoids unnecessary roundtrips for Cifti2Headers # Older versions transformed a `SeriesStep="2"` into `SeriesStep="2.0"` - chksum = "f7b8755c6ad0d8dcdb60676331b52a23ce288b61" - assert sha1(Path(out).read_bytes()).hexdigest() == chksum + chksum = 'f7b8755c6ad0d8dcdb60676331b52a23ce288b61' + assert sha1(Path(out).read_bytes()).hexdigest() == chksum # noqa: S324 -@pytest.mark.parametrize("interface", [bintfs.DerivativesDataSink, bintfs.PrepareDerivative]) +@pytest.mark.parametrize('interface', [bintfs.DerivativesDataSink, bintfs.PrepareDerivative]) def test_DerivativesDataSink_dtseries_json(tmp_path, interface): - cifti_fname = str(tmp_path / "test.dtseries.nii") + cifti_fname = str(tmp_path / 'test.dtseries.nii') - axes = (nb.cifti2.SeriesAxis(start=0, step=2, size=20), - nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5)))) + axes = ( + nb.cifti2.SeriesAxis(start=0, step=2, size=20), + nb.cifti2.BrainModelAxis.from_mask(np.ones((5, 5, 5))), + ) hdr = nb.cifti2.cifti2_axes.to_header(axes) - cifti = nb.Cifti2Image(np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32), - header=hdr) - cifti.nifti_header.set_intent("ConnDenseSeries") + cifti = nb.Cifti2Image(np.zeros(hdr.matrix.get_data_shape(), dtype=np.float32), header=hdr) + cifti.nifti_header.set_intent('ConnDenseSeries') cifti.to_filename(cifti_fname) - source_file = tmp_path / "bids" / "sub-01" / "func" / "sub-01_task-rest_bold.nii.gz" + source_file = tmp_path / 'bids' / 'sub-01' / 'func' / 'sub-01_task-rest_bold.nii.gz' source_file.parent.mkdir(parents=True) source_file.touch() prep, save = make_prep_and_save( interface, base_directory=str(tmp_path), - out_path_base="", + out_path_base='', in_file=cifti_fname, source_file=str(source_file), compress=False, - space="fsLR", - grayordinates="91k", + space='fsLR', + grayordinates='91k', RepetitionTime=2.0, ) @@ -420,44 +417,44 @@ def test_DerivativesDataSink_dtseries_json(tmp_path, interface): out_path = Path(save_result.outputs.out_file) - assert out_path.name == "sub-01_task-rest_space-fsLR_bold.dtseries.nii" - old_sidecar = out_path.with_name("sub-01_task-rest_space-fsLR_bold.dtseries.json") - new_sidecar = out_path.with_name("sub-01_task-rest_space-fsLR_bold.json") + assert out_path.name == 'sub-01_task-rest_space-fsLR_bold.dtseries.nii' + old_sidecar = out_path.with_name('sub-01_task-rest_space-fsLR_bold.dtseries.json') + new_sidecar = out_path.with_name('sub-01_task-rest_space-fsLR_bold.json') assert not old_sidecar.exists() assert new_sidecar.exists() - assert "RepetitionTime" in json.loads(new_sidecar.read_text()) + assert 'RepetitionTime' in json.loads(new_sidecar.read_text()) -@pytest.mark.parametrize("interface", [bintfs.DerivativesDataSink, bintfs.PrepareDerivative]) +@pytest.mark.parametrize('interface', [bintfs.DerivativesDataSink, bintfs.PrepareDerivative]) @pytest.mark.parametrize( - "space, size, units, xcodes, zipped, fixed, data_dtype", + ('space', 'size', 'units', 'xcodes', 'zipped', 'fixed', 'data_dtype'), [ - ("T1w", (30, 30, 30, 10), ("mm", "sec"), (2, 2), True, [False], None), - ("T1w", (30, 30, 30, 10), ("mm", "sec"), (0, 2), True, [True], "float64"), - ("T1w", (30, 30, 30, 10), ("mm", "sec"), (0, 0), True, [True], " 1: data *= np.linspace(0.6, 1.0, num=10)[::-1] t_mask = np.zeros(shape[3], dtype=bool) t_mask[:3] = True - fname = str(tmpdir.join("file1.nii.gz")) + fname = str(tmpdir.join('file1.nii.gz')) nb.Nifti1Image(data, np.eye(4), None).to_filename(fname) avg = im.RobustAverage(in_file=fname, t_mask=list(t_mask)).run() @@ -194,24 +196,24 @@ def test_TemplateDimensions(tmp_path): ] for i, (shape, zoom) in enumerate(zip(shapes, zooms)): - img = nb.Nifti1Image(np.ones(shape, dtype="float32"), np.eye(4)) + img = nb.Nifti1Image(np.ones(shape, dtype='float32'), np.eye(4)) img.header.set_zooms(zoom) - img.to_filename(tmp_path / f"test{i}.nii") + img.to_filename(tmp_path / f'test{i}.nii') - anat_list = [str(tmp_path / f"test{i}.nii") for i in range(2)] + anat_list = [str(tmp_path / f'test{i}.nii') for i in range(2)] td = im.TemplateDimensions(anat_list=anat_list) res = td.run() report = Path(res.outputs.out_report).read_text() - assert "Input T1w images: 2" in report - assert "Output dimensions: 11x11x11" in report - assert "Output voxel size: 0.9mm x 0.9mm x 0.9mm" in report - assert "Discarded images: 0" in report + assert 'Input T1w images: 2' in report + assert 'Output dimensions: 11x11x11' in report + assert 'Output voxel size: 0.9mm x 0.9mm x 0.9mm' in report + assert 'Discarded images: 0' in report assert res.outputs.t1w_valid_list == anat_list assert res.outputs.anat_valid_list == anat_list assert np.allclose(res.outputs.target_zooms, (0.9, 0.9, 0.9)) assert res.outputs.target_shape == (11, 11, 11) - with pytest.warns(UserWarning, match="t1w_list .* is deprecated"): + with pytest.warns(UserWarning, match='t1w_list .* is deprecated'): im.TemplateDimensions(t1w_list=anat_list) diff --git a/niworkflows/interfaces/tests/test_itk.py b/niworkflows/interfaces/tests/test_itk.py index 7fd015f5b73..7648ab02d3f 100644 --- a/niworkflows/interfaces/tests/test_itk.py +++ b/niworkflows/interfaces/tests/test_itk.py @@ -33,26 +33,26 @@ from .data import load_test_data -@pytest.mark.skipif(Info.version() is None, reason="Missing ANTs") -@pytest.mark.parametrize("ext", (".nii", ".nii.gz")) -@pytest.mark.parametrize("copy_dtype", (True, False)) -@pytest.mark.parametrize("in_dtype", ("i2", "f4")) +@pytest.mark.skipif(Info.version() is None, reason='Missing ANTs') +@pytest.mark.parametrize('ext', ['.nii', '.nii.gz']) +@pytest.mark.parametrize('copy_dtype', [True, False]) +@pytest.mark.parametrize('in_dtype', ['i2', 'f4']) def test_applytfms(tmpdir, ext, copy_dtype, in_dtype): import nibabel as nb import numpy as np - in_file = str(tmpdir / ("src" + ext)) + in_file = str(tmpdir / ('src' + ext)) nii = nb.Nifti1Image(np.zeros((5, 5, 5), dtype=np.float32), np.eye(4)) nii.set_data_dtype(in_dtype) nii.to_filename(in_file) - in_xform = data.load("itkIdentityTransform.txt") + in_xform = data.load('itkIdentityTransform.txt') - ifargs = {"copy_dtype": copy_dtype, "reference_image": in_file} + ifargs = {'copy_dtype': copy_dtype, 'reference_image': in_file} args = (in_file, in_xform, ifargs, 0, str(tmpdir)) out_file, cmdline = _applytfms(args) - assert out_file == str(tmpdir / ("src_xform-%05d%s" % (0, ext))) + assert out_file == str(tmpdir / ('src_xform-%05d%s' % (0, ext))) out_nii = nb.load(out_file) assert np.allclose(nii.affine, out_nii.affine) @@ -67,11 +67,11 @@ def test_MCFLIRT2ITK(tmp_path): fsl2itk = pe.Node( MCFLIRT2ITK( - in_files=[str(test_data / "MAT_0098"), str(test_data / "MAT_0099")], - in_reference=str(test_data / "boldref.nii"), - in_source=str(test_data / "boldref.nii"), + in_files=[str(test_data / 'MAT_0098'), str(test_data / 'MAT_0099')], + in_reference=str(test_data / 'boldref.nii'), + in_source=str(test_data / 'boldref.nii'), ), - name="fsl2itk", + name='fsl2itk', base_dir=str(tmp_path), ) @@ -82,22 +82,22 @@ def test_MCFLIRT2ITK(tmp_path): lines = out_file.read_text().splitlines() assert lines[:2] == [ - "#Insight Transform File V1.0", - "#Transform 0", + '#Insight Transform File V1.0', + '#Transform 0', ] assert re.match( - r"Transform: (MatrixOffsetTransformBase|AffineTransform)_(float|double)_3_3", + r'Transform: (MatrixOffsetTransformBase|AffineTransform)_(float|double)_3_3', lines[2], ) - assert lines[3].startswith("Parameters: ") - assert lines[4] == "FixedParameters: 0 0 0" - offset = 1 if lines[5] == "" else 0 - assert lines[5 + offset] == "#Transform 1" + assert lines[3].startswith('Parameters: ') + assert lines[4] == 'FixedParameters: 0 0 0' + offset = 1 if lines[5] == '' else 0 + assert lines[5 + offset] == '#Transform 1' assert lines[6 + offset] == lines[2] - assert lines[7 + offset].startswith("Parameters: ") + assert lines[7 + offset].startswith('Parameters: ') - params0 = np.array([float(p) for p in lines[3].split(" ")[1:]]) - params1 = np.array([float(p) for p in lines[7 + offset].split(" ")[1:]]) + params0 = np.array([float(p) for p in lines[3].split(' ')[1:]]) + params1 = np.array([float(p) for p in lines[7 + offset].split(' ')[1:]]) # Empirically determined assert np.allclose( params0, diff --git a/niworkflows/interfaces/tests/test_morphology.py b/niworkflows/interfaces/tests/test_morphology.py index bb457d6f1b2..cd1ab71b9c8 100644 --- a/niworkflows/interfaces/tests/test_morphology.py +++ b/niworkflows/interfaces/tests/test_morphology.py @@ -21,10 +21,12 @@ # https://www.nipreps.org/community/licensing/ # """Test morphology module.""" -from pathlib import Path + import shutil -import numpy as np +from pathlib import Path + import nibabel as nb +import numpy as np from niworkflows.interfaces.morphology import ( BinaryDilation, @@ -35,40 +37,40 @@ def test_BinaryDilation_interface(tmpdir): """Check the dilation interface.""" - data = np.zeros((80, 80, 80), dtype="uint8") + data = np.zeros((80, 80, 80), dtype='uint8') data[30:-30, 35:-35, 20:-20] = 1 - nb.Nifti1Image(data, np.eye(4), None).to_filename("mask.nii.gz") + nb.Nifti1Image(data, np.eye(4), None).to_filename('mask.nii.gz') out1 = ( BinaryDilation( - in_mask=str(Path("mask.nii.gz").absolute()), + in_mask=str(Path('mask.nii.gz').absolute()), radius=4, ) .run() .outputs.out_mask ) - shutil.move(out1, "large_radius.nii.gz") + shutil.move(out1, 'large_radius.nii.gz') out2 = ( BinaryDilation( - in_mask=str(Path("mask.nii.gz").absolute()), + in_mask=str(Path('mask.nii.gz').absolute()), radius=1, ) .run() .outputs.out_mask ) - shutil.move(out2, "small_radius.nii.gz") + shutil.move(out2, 'small_radius.nii.gz') out_final = ( BinarySubtraction( - in_base=str(Path("large_radius.nii.gz").absolute()), - in_subtract=str(Path("small_radius.nii.gz").absolute()), + in_base=str(Path('large_radius.nii.gz').absolute()), + in_subtract=str(Path('small_radius.nii.gz').absolute()), ) .run() .outputs.out_mask ) - out_data = np.asanyarray(nb.load(out_final).dataobj, dtype="uint8") + out_data = np.asanyarray(nb.load(out_final).dataobj, dtype='uint8') assert np.all(out_data[data] == 0) diff --git a/niworkflows/interfaces/tests/test_nibabel.py b/niworkflows/interfaces/tests/test_nibabel.py index bd61ee22fae..7207502a344 100644 --- a/niworkflows/interfaces/tests/test_nibabel.py +++ b/niworkflows/interfaces/tests/test_nibabel.py @@ -21,16 +21,24 @@ # https://www.nipreps.org/community/licensing/ # """test nibabel interfaces.""" + import json import os -from pathlib import Path import uuid -import numpy as np +from pathlib import Path + import nibabel as nb +import numpy as np import pytest from ..nibabel import ( - Binarize, ApplyMask, SplitSeries, MergeSeries, MergeROIs, MapLabels, ReorientImage + ApplyMask, + Binarize, + MapLabels, + MergeROIs, + MergeSeries, + ReorientImage, + SplitSeries, ) @@ -41,10 +49,11 @@ def create_roi(tmp_path): def _create_roi(affine, img_data, roi_index): img_data[tuple(roi_index)] = 1 nii = nb.Nifti1Image(img_data, affine) - filename = tmp_path / f"{str(uuid.uuid4())}.nii.gz" + filename = tmp_path / f'{str(uuid.uuid4())}.nii.gz' files.append(filename) nii.to_filename(filename) return filename + yield _create_roi # cleanup files for f in files: @@ -62,7 +71,7 @@ def create_image(data, filename): @pytest.mark.parametrize( - "affine, data, roi_index, error, err_message", + ('affine', 'data', 'roi_index', 'error', 'err_message'), [ (np.eye(4), np.zeros((2, 2, 2, 2), dtype=np.uint16), [1, 0], None, None), ( @@ -70,21 +79,21 @@ def create_image(data, filename): np.zeros((2, 2, 3, 2), dtype=np.uint16), [1, 0], True, - "Mismatch in image shape", + 'Mismatch in image shape', ), ( bad_affine, np.zeros((2, 2, 2, 2), dtype=np.uint16), [1, 0], True, - "Mismatch in affine", + 'Mismatch in affine', ), ( np.eye(4), np.zeros((2, 2, 2, 2), dtype=np.uint16), [0, 0, 0], True, - "Overlapping ROIs", + 'Overlapping ROIs', ), ], ) @@ -99,7 +108,7 @@ def test_merge_rois(tmpdir, create_roi, affine, data, roi_index, error, err_mess merge.run() return # otherwise check expected exceptions - with pytest.raises(AssertionError) as err: + with pytest.raises(ValueError, match=r'Mismatch|Overlapping') as err: merge.run() assert err_message in str(err.value) @@ -111,10 +120,10 @@ def test_Binarize(tmp_path): mask = np.zeros((20, 20, 20), dtype=bool) mask[5:15, 5:15, 5:15] = bool - data = np.zeros_like(mask, dtype="float32") + data = np.zeros_like(mask, dtype='float32') data[mask] = np.random.gamma(2, size=mask.sum()) - in_file = tmp_path / "input.nii.gz" + in_file = tmp_path / 'input.nii.gz' nb.Nifti1Image(data, np.eye(4), None).to_filename(str(in_file)) binif = Binarize(thresh_low=0.0, in_file=str(in_file)).run() @@ -133,47 +142,43 @@ def test_ApplyMask(tmp_path): mask[8:11, 8:11, 8:11] = 1.0 # Test the 3D - in_file = tmp_path / "input3D.nii.gz" + in_file = tmp_path / 'input3D.nii.gz' nb.Nifti1Image(data, np.eye(4), None).to_filename(str(in_file)) - in_mask = tmp_path / "mask.nii.gz" + in_mask = tmp_path / 'mask.nii.gz' nb.Nifti1Image(mask, np.eye(4), None).to_filename(str(in_mask)) masked1 = ApplyMask(in_file=str(in_file), in_mask=str(in_mask), threshold=0.4).run() - assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5 ** 3 + assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5**3 masked1 = ApplyMask(in_file=str(in_file), in_mask=str(in_mask), threshold=0.6).run() - assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3 ** 3 + assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3**3 data4d = np.stack((data, 2 * data, 3 * data), axis=-1) # Test the 4D case - in_file4d = tmp_path / "input4D.nii.gz" + in_file4d = tmp_path / 'input4D.nii.gz' nb.Nifti1Image(data4d, np.eye(4), None).to_filename(str(in_file4d)) - masked1 = ApplyMask( - in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.4 - ).run() - assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5 ** 3 * 6 + masked1 = ApplyMask(in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.4).run() + assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 5**3 * 6 - masked1 = ApplyMask( - in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.6 - ).run() - assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3 ** 3 * 6 + masked1 = ApplyMask(in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.6).run() + assert nb.load(masked1.outputs.out_file).get_fdata().sum() == 3**3 * 6 # Test errors nb.Nifti1Image(mask, 2 * np.eye(4), None).to_filename(str(in_mask)) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'affines are not similar'): ApplyMask(in_file=str(in_file), in_mask=str(in_mask), threshold=0.4).run() nb.Nifti1Image(mask[:-1, ...], np.eye(4), None).to_filename(str(in_mask)) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'sizes do not match'): ApplyMask(in_file=str(in_file), in_mask=str(in_mask), threshold=0.4).run() - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'sizes do not match'): ApplyMask(in_file=str(in_file4d), in_mask=str(in_mask), threshold=0.4).run() @pytest.mark.parametrize( - "shape,exp_n", + ('shape', 'exp_n'), [ ((20, 20, 20, 15), 15), ((20, 20, 20), 1), @@ -193,18 +198,16 @@ def test_SplitSeries(tmp_path, shape, exp_n): """Test 4-to-3 NIfTI split interface.""" os.chdir(tmp_path) - in_file = str(tmp_path / "input.nii.gz") + in_file = str(tmp_path / 'input.nii.gz') nb.Nifti1Image(np.ones(shape, dtype=float), np.eye(4), None).to_filename(in_file) _interface = SplitSeries(in_file=in_file) if exp_n > 0: split = _interface.run() - n = int(isinstance(split.outputs.out_files, str)) or len( - split.outputs.out_files - ) + n = int(isinstance(split.outputs.out_files, str)) or len(split.outputs.out_files) assert n == exp_n else: - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'Invalid shape'): _interface.run() @@ -212,23 +215,19 @@ def test_MergeSeries(tmp_path): """Test 3-to-4 NIfTI concatenation interface.""" os.chdir(str(tmp_path)) - in_file = tmp_path / "input3D.nii.gz" - nb.Nifti1Image(np.ones((20, 20, 20), dtype=float), np.eye(4), None).to_filename( - str(in_file) - ) + in_file = tmp_path / 'input3D.nii.gz' + nb.Nifti1Image(np.ones((20, 20, 20), dtype=float), np.eye(4), None).to_filename(str(in_file)) merge = MergeSeries(in_files=[str(in_file)] * 5).run() assert nb.load(merge.outputs.out_file).dataobj.shape == (20, 20, 20, 5) - in_4D = tmp_path / "input4D.nii.gz" - nb.Nifti1Image(np.ones((20, 20, 20, 4), dtype=float), np.eye(4), None).to_filename( - str(in_4D) - ) + in_4D = tmp_path / 'input4D.nii.gz' + nb.Nifti1Image(np.ones((20, 20, 20, 4), dtype=float), np.eye(4), None).to_filename(str(in_4D)) merge = MergeSeries(in_files=[str(in_file)] + [str(in_4D)]).run() assert nb.load(merge.outputs.out_file).dataobj.shape == (20, 20, 20, 5) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'incorrect number of dimensions'): MergeSeries(in_files=[str(in_file)] + [str(in_4D)], allow_4D=False).run() @@ -244,7 +243,7 @@ def test_MergeSeries_affines(tmp_path): nb.Nifti1Image(data, aff, None).to_filename(files[1]) # affine mismatch will cause this to fail - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'does not match affine'): MergeSeries(in_files=files).run() # but works if we set a tolerance MergeSeries(in_files=files, affine_tolerance=1e-04).run() @@ -256,7 +255,7 @@ def test_MergeSeries_affines(tmp_path): @pytest.mark.parametrize( - "data,mapping,tojson,expected", + ('data', 'mapping', 'tojson', 'expected'), [ (LABEL_INPUT, LABEL_MAPPINGS, False, LABEL_OUTPUT), (LABEL_INPUT, LABEL_MAPPINGS, True, LABEL_OUTPUT), @@ -264,7 +263,7 @@ def test_MergeSeries_affines(tmp_path): ) def test_map_labels(tmpdir, data, mapping, tojson, expected): tmpdir.chdir() - in_file = create_image(data, Path("test.nii.gz")) + in_file = create_image(data, Path('test.nii.gz')) maplbl = MapLabels(in_file=in_file) if tojson: map_file = Path('mapping.json') @@ -288,7 +287,7 @@ def create_save_img(ornt: str): data = np.random.rand(2, 2, 2) img = nb.Nifti1Image(data, affine=np.eye(4)) # img will always be in RAS at the start - ras = nb.orientations.axcodes2ornt("RAS") + ras = nb.orientations.axcodes2ornt('RAS') if ornt != 'RAS': new = nb.orientations.axcodes2ornt(ornt) xfm = nb.orientations.ornt_transform(ras, new) @@ -299,13 +298,13 @@ def create_save_img(ornt: str): @pytest.mark.parametrize( - "in_ornt,out_ornt", + ('in_ornt', 'out_ornt'), [ - ("RAS", "RAS"), - ("RAS", "LAS"), - ("LAS", "RAS"), - ("RAS", "RPI"), - ("LPI", "RAS"), + ('RAS', 'RAS'), + ('RAS', 'LAS'), + ('LAS', 'RAS'), + ('RAS', 'RPI'), + ('LPI', 'RAS'), ], ) def test_reorient_image(tmpdir, in_ornt, out_ornt): diff --git a/niworkflows/interfaces/tests/test_plotting.py b/niworkflows/interfaces/tests/test_plotting.py index e6743393ddc..e5fc0a10934 100644 --- a/niworkflows/interfaces/tests/test_plotting.py +++ b/niworkflows/interfaces/tests/test_plotting.py @@ -21,20 +21,23 @@ # https://www.nipreps.org/community/licensing/ # """Tests plotting interfaces.""" + import os + import nibabel as nb + from niworkflows import viz -from niworkflows.utils.timeseries import _cifti_timeseries, _nifti_timeseries from niworkflows.interfaces.plotting import _get_tr from niworkflows.tests.data import load_test_data +from niworkflows.utils.timeseries import _cifti_timeseries, _nifti_timeseries def test_cifti_carpetplot(): """Exercise extraction of timeseries from CIFTI2.""" - save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False) + save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False) cifti_file = load_test_data( - "sub-01_task-mixedgamblestask_run-02_space-fsLR_den-91k_bold.dtseries.nii" + 'sub-01_task-mixedgamblestask_run-02_space-fsLR_den-91k_bold.dtseries.nii' ) data, segments = _cifti_timeseries(str(cifti_file)) viz.plot_carpet( @@ -42,34 +45,26 @@ def test_cifti_carpetplot(): segments, tr=_get_tr(nb.load(cifti_file)), output_file=( - os.path.join( - save_artifacts, "carpetplot_cifti.svg" - ) if save_artifacts else None + os.path.join(save_artifacts, 'carpetplot_cifti.svg') if save_artifacts else None ), drop_trs=0, - cmap="paired", + cmap='paired', ) def test_nifti_carpetplot(): """Exercise extraction of timeseries from CIFTI2.""" - save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False) + save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False) - nifti_file = load_test_data( - "sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz" - ) - seg_file = load_test_data( - "sub-ds205s03_task-functionallocalizer_run-01_bold_parc.nii.gz" - ) + nifti_file = load_test_data('sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz') + seg_file = load_test_data('sub-ds205s03_task-functionallocalizer_run-01_bold_parc.nii.gz') data, segments = _nifti_timeseries(str(nifti_file), str(seg_file)) viz.plot_carpet( data, segments, tr=_get_tr(nb.load(nifti_file)), output_file=( - os.path.join( - save_artifacts, "carpetplot_nifti.svg" - ) if save_artifacts else None + os.path.join(save_artifacts, 'carpetplot_nifti.svg') if save_artifacts else None ), drop_trs=0, ) diff --git a/niworkflows/interfaces/tests/test_utility.py b/niworkflows/interfaces/tests/test_utility.py index 0b18b55b2c2..5cb3be767a3 100644 --- a/niworkflows/interfaces/tests/test_utility.py +++ b/niworkflows/interfaces/tests/test_utility.py @@ -21,22 +21,25 @@ # https://www.nipreps.org/community/licensing/ # """KeySelect tests.""" + from pathlib import Path + import pytest + from ..utility import KeySelect, _tsv2json def test_KeySelect(): """Test KeySelect.""" - with pytest.raises(ValueError): - KeySelect(fields="field1", keys=["a", "b", "c", "a"]) + with pytest.raises(ValueError, match=r'duplicated entries'): + KeySelect(fields='field1', keys=['a', 'b', 'c', 'a']) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'list or .* must be provided'): KeySelect(fields=[]) def test_tsv2json(tmp_path): - Path.write_bytes(tmp_path / 'empty.tsv', bytes()) + Path.write_bytes(tmp_path / 'empty.tsv', b'') res = _tsv2json(tmp_path / 'empty.tsv', None, 'any_column') assert res == {} res = _tsv2json(tmp_path / 'empty.tsv', None, 'any_column', additional_metadata={'a': 'b'}) diff --git a/niworkflows/interfaces/utility.py b/niworkflows/interfaces/utility.py index 2f0b1843f39..4a3dd60108c 100644 --- a/niworkflows/interfaces/utility.py +++ b/niworkflows/interfaces/utility.py @@ -21,34 +21,35 @@ # https://www.nipreps.org/community/licensing/ # """Interfaces under evaluation before upstreaming to nipype.interfaces.utility.""" -import numpy as np -import re + import json +import re from collections import OrderedDict -from nipype.utils.filemanip import fname_presuffix -from nipype.interfaces.io import add_traits +import numpy as np from nipype.interfaces.base import ( BaseInterface, BaseInterfaceInputSpec, DynamicTraitedSpec, File, InputMultiObject, - isdefined, SimpleInterface, Str, TraitedSpec, + isdefined, traits, ) +from nipype.interfaces.io import add_traits +from nipype.utils.filemanip import fname_presuffix class _KeySelectInputSpec(DynamicTraitedSpec): - key = Str(mandatory=True, desc="selective key") - keys = InputMultiObject(Str, mandatory=True, min=1, desc="index of keys") + key = Str(mandatory=True, desc='selective key') + keys = InputMultiObject(Str, mandatory=True, min=1, desc='index of keys') class _KeySelectOutputSpec(DynamicTraitedSpec): - key = Str(desc="propagates selected key") + key = Str(desc='propagates selected key') class KeySelect(BaseInterface): @@ -161,15 +162,14 @@ def __init__(self, keys=None, fields=None, **inputs): # Handle and initiate fields if not fields: raise ValueError( - "A list or multiplexed fields must be provided at " - "instantiation time." + 'A list or multiplexed fields must be provided at instantiation time.' ) if isinstance(fields, str): fields = [fields] _invalid = set(self.input_spec.class_editable_traits()).intersection(fields) if _invalid: - raise ValueError("Some fields are invalid (%s)." % ", ".join(_invalid)) + raise ValueError(f'Some fields are invalid ({", ".join(_invalid)}).') self._fields = fields @@ -185,29 +185,25 @@ def __init__(self, keys=None, fields=None, **inputs): setattr(self.inputs, in_field, inputs[in_field]) def _check_len(self, name, new): - if name == "keys": + if name == 'keys': nitems = len(new) if len(set(new)) != nitems: - raise ValueError( - "Found duplicated entries in the index of ordered keys" - ) + raise ValueError('Found duplicated entries in the index of ordered keys') if not isdefined(self.inputs.keys): return - if name == "key" and new not in self.inputs.keys: - raise ValueError('Selected key "%s" not found in the index' % new) + if name == 'key' and new not in self.inputs.keys: + raise ValueError(f'Selected key "{new}" not found in the index') if name in self._fields: if isinstance(new, str) or len(new) < 1: - raise ValueError( - 'Trying to set an invalid value (%s) for input "%s"' % (new, name) - ) + raise ValueError(f'Trying to set an invalid value ({new}) for input "{name}"') if len(new) != len(self.inputs.keys): raise ValueError( - 'Length of value (%s) for input field "%s" does not match ' - "the length of the indexing list." % (new, name) + f'Length of value ({new}) for input field "{name}" does not match ' + 'the length of the indexing list.' ) def _run_interface(self, runtime): @@ -218,7 +214,7 @@ def _list_outputs(self): outputs = {k: getattr(self.inputs, k)[index] for k in self._fields} - outputs["key"] = self.inputs.key + outputs['key'] = self.inputs.key return outputs def _outputs(self): @@ -228,12 +224,12 @@ def _outputs(self): class _AddTSVHeaderInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input file") - columns = traits.List(traits.Str, mandatory=True, desc="header for columns") + in_file = File(exists=True, mandatory=True, desc='input file') + columns = traits.List(traits.Str, mandatory=True, desc='header for columns') class _AddTSVHeaderOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output average file") + out_file = File(exists=True, desc='output average file') class AddTSVHeader(SimpleInterface): @@ -259,13 +255,14 @@ class AddTSVHeader(SimpleInterface): True """ + input_spec = _AddTSVHeaderInputSpec output_spec = _AddTSVHeaderOutputSpec def _run_interface(self, runtime): out_file = fname_presuffix( self.inputs.in_file, - suffix="_motion.tsv", + suffix='_motion.tsv', newpath=runtime.cwd, use_ext=False, ) @@ -273,24 +270,24 @@ def _run_interface(self, runtime): np.savetxt( out_file, data, - delimiter="\t", - header="\t".join(self.inputs.columns), - comments="", + delimiter='\t', + header='\t'.join(self.inputs.columns), + comments='', ) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime class _JoinTSVColumnsInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="input file") - join_file = File(exists=True, mandatory=True, desc="file to be adjoined") - side = traits.Enum("right", "left", usedefault=True, desc="where to join") - columns = traits.List(traits.Str, desc="header for columns") + in_file = File(exists=True, mandatory=True, desc='input file') + join_file = File(exists=True, mandatory=True, desc='file to be adjoined') + side = traits.Enum('right', 'left', usedefault=True, desc='where to join') + columns = traits.List(traits.Str, desc='header for columns') class _JoinTSVColumnsOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="output TSV file") + out_file = File(exists=True, desc='output TSV file') class JoinTSVColumns(SimpleInterface): @@ -348,20 +345,21 @@ class JoinTSVColumns(SimpleInterface): True """ + input_spec = _JoinTSVColumnsInputSpec output_spec = _JoinTSVColumnsOutputSpec def _run_interface(self, runtime): out_file = fname_presuffix( self.inputs.in_file, - suffix="_joined.tsv", + suffix='_joined.tsv', newpath=runtime.cwd, use_ext=False, ) - header = "" + header = '' if isdefined(self.inputs.columns) and self.inputs.columns: - header = "\t".join(self.inputs.columns) + header = '\t'.join(self.inputs.columns) with open(self.inputs.in_file) as ifh: data = ifh.read().splitlines(keepends=False) @@ -370,33 +368,33 @@ def _run_interface(self, runtime): join = ifh.read().splitlines(keepends=False) if len(data) != len(join): - raise ValueError("Number of columns in datasets do not match") + raise ValueError('Number of columns in datasets do not match') merged = [] for d, j in zip(data, join): - line = "%s\t%s" % ((j, d) if self.inputs.side == "left" else (d, j)) + line = '%s\t%s' % ((j, d) if self.inputs.side == 'left' else (d, j)) merged.append(line) if header: merged.insert(0, header) - with open(out_file, "w") as ofh: - ofh.write("\n".join(merged)) + with open(out_file, 'w') as ofh: + ofh.write('\n'.join(merged)) - self._results["out_file"] = out_file + self._results['out_file'] = out_file return runtime class _DictMergeInputSpec(BaseInterfaceInputSpec): in_dicts = traits.List( traits.Either(traits.Dict, traits.Instance(OrderedDict)), - desc="Dictionaries to be merged. In the event of a collision, values " - "from dictionaries later in the list receive precedence.", + desc='Dictionaries to be merged. In the event of a collision, values ' + 'from dictionaries later in the list receive precedence.', ) class _DictMergeOutputSpec(TraitedSpec): - out_dict = traits.Dict(desc="Merged dictionary") + out_dict = traits.Dict(desc='Merged dictionary') class DictMerge(SimpleInterface): @@ -409,45 +407,43 @@ def _run_interface(self, runtime): out_dict = {} for in_dict in self.inputs.in_dicts: out_dict.update(in_dict) - self._results["out_dict"] = out_dict + self._results['out_dict'] = out_dict return runtime class _TSV2JSONInputSpec(BaseInterfaceInputSpec): - in_file = File(exists=True, mandatory=True, desc="Input TSV file") + in_file = File(exists=True, mandatory=True, desc='Input TSV file') index_column = traits.Str( mandatory=True, - desc="Name of the column in the TSV to be used " - "as the top-level key in the JSON. All " - "remaining columns will be assigned as " - "nested keys.", + desc='Name of the column in the TSV to be used ' + 'as the top-level key in the JSON. All ' + 'remaining columns will be assigned as ' + 'nested keys.', ) output = traits.Either( None, File, - desc="Path where the output file is to be saved. " - "If this is `None`, then a JSON-compatible " - "dictionary is returned instead.", + desc='Path where the output file is to be saved. ' + 'If this is `None`, then a JSON-compatible ' + 'dictionary is returned instead.', ) additional_metadata = traits.Either( None, traits.Dict, traits.Instance(OrderedDict), usedefault=True, - desc="Any additional metadata that " - "should be applied to all " - "entries in the JSON.", + desc='Any additional metadata that should be applied to all entries in the JSON.', ) drop_columns = traits.Either( None, traits.List(), usedefault=True, - desc="List of columns in the TSV to be dropped from the JSON.", + desc='List of columns in the TSV to be dropped from the JSON.', ) enforce_case = traits.Bool( True, usedefault=True, - desc="Enforce snake case for top-level keys and camel case for nested keys", + desc='Enforce snake case for top-level keys and camel case for nested keys', ) @@ -456,7 +452,7 @@ class _TSV2JSONOutputSpec(TraitedSpec): traits.Dict, File(exists=True), traits.Instance(OrderedDict), - desc="Output dictionary or JSON file", + desc='Output dictionary or JSON file', ) @@ -469,12 +465,12 @@ class TSV2JSON(SimpleInterface): def _run_interface(self, runtime): if not isdefined(self.inputs.output): output = fname_presuffix( - self.inputs.in_file, suffix=".json", newpath=runtime.cwd, use_ext=False + self.inputs.in_file, suffix='.json', newpath=runtime.cwd, use_ext=False ) else: output = self.inputs.output - self._results["output"] = _tsv2json( + self._results['output'] = _tsv2json( in_tsv=self.inputs.in_file, out_json=output, index_column=self.inputs.index_column, @@ -525,45 +521,42 @@ def _tsv2json( # Adapted from https://dev.to/rrampage/snake-case-to-camel-case-and- ... # back-using-regular-expressions-and-python-m9j - re_to_camel = r"(.*?)_([a-zA-Z0-9])" - re_to_snake = r"(^.+?|.*?)((? and" - " has the desired output mesh", + desc='A sphere surface that is in register with and' + ' has the desired output mesh', ) method = traits.Enum( - "ADAP_BARY_AREA", - "BARYCENTRIC", - argstr="%s", + 'ADAP_BARY_AREA', + 'BARYCENTRIC', + argstr='%s', mandatory=True, position=3, - desc="The method name - ADAP_BARY_AREA method is recommended for" - " ordinary metric data, because it should use all data while" - " downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used," - " exactly one of area_surfs or area_metrics must be specified", + desc='The method name - ADAP_BARY_AREA method is recommended for' + ' ordinary metric data, because it should use all data while' + ' downsampling, unlike BARYCENTRIC. If ADAP_BARY_AREA is used,' + ' exactly one of area_surfs or area_metrics must be specified', ) out_file = File( - name_source=["new_sphere"], - name_template="%s.out", + name_source=['new_sphere'], + name_template='%s.out', keep_extension=True, - argstr="%s", + argstr='%s', position=4, - desc="The output metric", + desc='The output metric', ) area_surfs = traits.Bool( position=5, - argstr="-area-surfs", - xor=["area_metrics"], - desc="Specify surfaces to do vertex area correction based on", + argstr='-area-surfs', + xor=['area_metrics'], + desc='Specify surfaces to do vertex area correction based on', ) area_metrics = traits.Bool( position=5, - argstr="-area-metrics", - xor=["area_surfs"], - desc="Specify vertex area metrics to do area correction based on", + argstr='-area-metrics', + xor=['area_surfs'], + desc='Specify vertex area metrics to do area correction based on', ) current_area = File( exists=True, position=6, - argstr="%s", - desc="A relevant anatomical surface with mesh OR" - " a metric file with vertex areas for mesh", + argstr='%s', + desc='A relevant anatomical surface with mesh OR' + ' a metric file with vertex areas for mesh', ) new_area = File( exists=True, position=7, - argstr="%s", - desc="A relevant anatomical surface with mesh OR" - " a metric file with vertex areas for mesh", + argstr='%s', + desc='A relevant anatomical surface with mesh OR' + ' a metric file with vertex areas for mesh', ) roi_metric = File( exists=True, position=8, - argstr="-current-roi %s", - desc="Input roi on the current mesh used to exclude non-data vertices", + argstr='-current-roi %s', + desc='Input roi on the current mesh used to exclude non-data vertices', ) valid_roi_out = traits.Bool( position=9, - argstr="-valid-roi-out", - desc="Output the ROI of vertices that got data from valid source vertices", + argstr='-valid-roi-out', + desc='Output the ROI of vertices that got data from valid source vertices', ) largest = traits.Bool( position=10, - argstr="-largest", - desc="Use only the value of the vertex with the largest weight", + argstr='-largest', + desc='Use only the value of the vertex with the largest weight', ) class MetricResampleOutputSpec(TraitedSpec): - out_file = File(exists=True, desc="the output metric") - roi_file = File(desc="ROI of vertices that got data from valid source vertices") + out_file = File(exists=True, desc='the output metric') + roi_file = File(desc='ROI of vertices that got data from valid source vertices') class MetricResample(WBCommand, OpenMPCommandMixin): @@ -276,184 +277,182 @@ class MetricResample(WBCommand, OpenMPCommandMixin): input_spec = MetricResampleInputSpec output_spec = MetricResampleOutputSpec - _cmd = "wb_command -metric-resample" + _cmd = 'wb_command -metric-resample' def _format_arg(self, opt, spec, val): - if opt in ["current_area", "new_area"]: + if opt in ['current_area', 'new_area']: if not self.inputs.area_surfs and not self.inputs.area_metrics: - raise ValueError( - "{} was set but neither area_surfs or area_metrics were set".format(opt) - ) - if opt == "method": + raise ValueError(f'{opt} was set but neither area_surfs or area_metrics were set') + if opt == 'method': if ( - val == "ADAP_BARY_AREA" + val == 'ADAP_BARY_AREA' and not self.inputs.area_surfs and not self.inputs.area_metrics ): - raise ValueError("Exactly one of area_surfs or area_metrics must be specified") - if opt == "valid_roi_out" and val: + raise ValueError('Exactly one of area_surfs or area_metrics must be specified') + if opt == 'valid_roi_out' and val: # generate a filename and add it to argstr - roi_out = self._gen_filename(self.inputs.in_file, suffix="_roi") - iflogger.info("Setting roi output file as", roi_out) - spec.argstr += " " + roi_out + roi_out = self._gen_filename(self.inputs.in_file, suffix='_roi') + iflogger.info('Setting roi output file as', roi_out) + spec.argstr += ' ' + roi_out return super()._format_arg(opt, spec, val) def _list_outputs(self): outputs = super()._list_outputs() if self.inputs.valid_roi_out: - roi_file = self._gen_filename(self.inputs.in_file, suffix="_roi") - outputs["roi_file"] = os.path.abspath(roi_file) + roi_file = self._gen_filename(self.inputs.in_file, suffix='_roi') + outputs['roi_file'] = os.path.abspath(roi_file) return outputs class VolumeToSurfaceMappingInputSpec(OpenMPTraitedSpec): volume_file = File( exists=True, - argstr="%s", + argstr='%s', mandatory=True, position=1, - desc="the volume to map data from", + desc='the volume to map data from', ) surface_file = File( exists=True, - argstr="%s", + argstr='%s', mandatory=True, position=2, - desc="the surface to map the data onto", + desc='the surface to map the data onto', ) out_file = File( - name_source=["surface_file"], - name_template="%s_mapped.func.gii", + name_source=['surface_file'], + name_template='%s_mapped.func.gii', keep_extension=False, - argstr="%s", + argstr='%s', position=3, - desc="the output metric file", + desc='the output metric file', ) method = traits.Enum( - "trilinear", - "enclosing", - "cubic", - "ribbon-constrained", - "myelin-style", - argstr="-%s", + 'trilinear', + 'enclosing', + 'cubic', + 'ribbon-constrained', + 'myelin-style', + argstr='-%s', position=4, - desc="the interpolation method to use", + desc='the interpolation method to use', ) _ribbon_constrained = [ - "inner_surface", - "outer_surface", - "volume_roi", - "weighted", - "voxel_subdiv", - "gaussian", - "interpolate", - "bad_vertices_out", - "output_weights", - "output_weights_text", + 'inner_surface', + 'outer_surface', + 'volume_roi', + 'weighted', + 'voxel_subdiv', + 'gaussian', + 'interpolate', + 'bad_vertices_out', + 'output_weights', + 'output_weights_text', ] _myelin_style = [ - "ribbon_roi", - "thickness", - "sigma", - "legacy_bug", + 'ribbon_roi', + 'thickness', + 'sigma', + 'legacy_bug', ] inner_surface = File( exists=True, - argstr="%s", + argstr='%s', position=5, - desc="the inner surface of the ribbon [-ribbon-constrained]", + desc='the inner surface of the ribbon [-ribbon-constrained]', xor=_myelin_style, ) outer_surface = File( exists=True, - argstr="%s", + argstr='%s', position=6, - desc="the outer surface of the ribbon [-ribbon-constrained]", + desc='the outer surface of the ribbon [-ribbon-constrained]', xor=_myelin_style, ) volume_roi = File( exists=True, - argstr="-volume-roi %s", + argstr='-volume-roi %s', position=7, - desc="use a volume roi [-ribbon-constrained]", + desc='use a volume roi [-ribbon-constrained]', xor=_myelin_style, ) weighted = traits.Bool( - argstr="-weighted", + argstr='-weighted', position=8, - desc="treat the roi values as weightings rather than binary [-ribbon-constrained]", - requires=["volume_roi"], + desc='treat the roi values as weightings rather than binary [-ribbon-constrained]', + requires=['volume_roi'], xor=_myelin_style, ) voxel_subdiv = traits.Int( default_value=3, - argstr="-voxel-subdiv %d", - desc="voxel divisions while estimating voxel weights [-ribbon-constrained]", + argstr='-voxel-subdiv %d', + desc='voxel divisions while estimating voxel weights [-ribbon-constrained]', xor=_myelin_style, ) thin_columns = traits.Bool( - argstr="-thin-columns", - desc="use non-overlapping polyhedra [-ribbon-constrained]", + argstr='-thin-columns', + desc='use non-overlapping polyhedra [-ribbon-constrained]', xor=_myelin_style, ) gaussian = traits.Float( - argstr="-gaussian %g", + argstr='-gaussian %g', desc="reduce weight to voxels that aren't near [-ribbon-constrained]", xor=_myelin_style, ) interpolate = traits.Enum( - "CUBIC", - "TRILINEAR", - "ENCLOSING_VOXEL", - argstr="-interpolate %s", - desc="instead of a weighted average of voxels, " - "interpolate at subpoints inside the ribbon [-ribbon-constrained]", + 'CUBIC', + 'TRILINEAR', + 'ENCLOSING_VOXEL', + argstr='-interpolate %s', + desc='instead of a weighted average of voxels, ' + 'interpolate at subpoints inside the ribbon [-ribbon-constrained]', xor=_myelin_style, ) bad_vertices_out = File( - argstr="-bad-vertices-out %s", + argstr='-bad-vertices-out %s', desc="output an ROI of which vertices didn't intersect any valid voxels", xor=_myelin_style, ) output_weights = traits.Int( - argstr="-output-weights %(0)d output_weights.nii.gz", - desc="write the voxel weights for a vertex to a volume file", + argstr='-output-weights %(0)d output_weights.nii.gz', + desc='write the voxel weights for a vertex to a volume file', xor=_myelin_style, ) output_weights_text = traits.File( - argstr="-output-weights-text %s", - desc="write the voxel weights for all vertices to a text file", + argstr='-output-weights-text %s', + desc='write the voxel weights for all vertices to a text file', xor=_myelin_style, ) ribbon_roi = File( exists=True, - argstr="%s", + argstr='%s', position=5, - desc="an roi volume of the cortical ribbon for this hemisphere [-myelin-style]", + desc='an roi volume of the cortical ribbon for this hemisphere [-myelin-style]', xor=_ribbon_constrained, ) thickness = File( exists=True, - argstr="%s", + argstr='%s', position=6, - desc="the thickness metric file for this hemisphere [-myelin-style]", + desc='the thickness metric file for this hemisphere [-myelin-style]', xor=_ribbon_constrained, ) sigma = traits.Float( - argstr="%g", + argstr='%g', position=7, - desc="gaussian kernel in mm for weighting voxels within range [-myelin-style]", + desc='gaussian kernel in mm for weighting voxels within range [-myelin-style]', xor=_ribbon_constrained, ) legacy_bug = traits.Bool( - argstr="-legacy-bug", + argstr='-legacy-bug', position=8, - desc="use the old bug in the myelin-style algorithm [-myelin-style]", + desc='use the old bug in the myelin-style algorithm [-myelin-style]', xor=_ribbon_constrained, ) subvol_select = traits.Int( - argstr="-subvol-select %d", - desc="select a single subvolume to map", + argstr='-subvol-select %d', + desc='select a single subvolume to map', ) """\ @@ -521,10 +520,10 @@ class VolumeToSurfaceMappingInputSpec(OpenMPTraitedSpec): class VolumeToSurfaceMappingOutputSpec(TraitedSpec): - out_file = File(desc="the output metric file") - bad_vertices_file = File(desc="the output metric file of vertices that have no data") - weights_file = File(desc="volume to write the weights to") - weights_text_file = File(desc="the output text filename") + out_file = File(desc='the output metric file') + bad_vertices_file = File(desc='the output metric file of vertices that have no data') + weights_file = File(desc='volume to write the weights to') + weights_text_file = File(desc='the output text filename') class VolumeToSurfaceMapping(WBCommand, OpenMPCommandMixin): @@ -585,25 +584,25 @@ class VolumeToSurfaceMapping(WBCommand, OpenMPCommandMixin): input_spec = VolumeToSurfaceMappingInputSpec output_spec = VolumeToSurfaceMappingOutputSpec - _cmd = "wb_command -volume-to-surface-mapping" + _cmd = 'wb_command -volume-to-surface-mapping' def _format_arg(self, opt, spec, val): if opt in self.input_spec._ribbon_constrained: - if self.inputs.method != "ribbon-constrained": - return "" + if self.inputs.method != 'ribbon-constrained': + return '' elif opt in self.input_spec._myelin_style: - if self.inputs.method != "myelin-style": - return "" + if self.inputs.method != 'myelin-style': + return '' return super()._format_arg(opt, spec, val) def _list_outputs(self): outputs = super()._list_outputs() if isdefined(self.inputs.bad_vertices_out): - outputs["bad_vertices_file"] = os.path.abspath(self.inputs.bad_vertices_out) + outputs['bad_vertices_file'] = os.path.abspath(self.inputs.bad_vertices_out) if isdefined(self.inputs.output_weights): - outputs["weights_file"] = os.path.abspath(self.inputs.output_weights) + outputs['weights_file'] = os.path.abspath(self.inputs.output_weights) if isdefined(self.inputs.output_weights_text): - outputs["weights_text_file"] = os.path.abspath(self.inputs.output_weights_text) + outputs['weights_text_file'] = os.path.abspath(self.inputs.output_weights_text) return outputs @@ -624,36 +623,36 @@ class MetricMaskInputSpec(CommandLineInputSpec): in_file = File( exists=True, - argstr="%s", + argstr='%s', position=1, mandatory=True, - desc="input metric file", + desc='input metric file', ) mask = File( exists=True, - argstr="%s", + argstr='%s', position=2, mandatory=True, - desc="mask metric file", + desc='mask metric file', ) out_file = File( - name_template="%s_masked.func.gii", - name_source=["in_file"], + name_template='%s_masked.func.gii', + name_source=['in_file'], keep_extension=False, - argstr="%s", + argstr='%s', position=3, - desc="output metric file", + desc='output metric file', ) column = traits.Either( traits.Int, traits.String, - argstr="-column %s", - desc="select a single column by number or name", + argstr='-column %s', + desc='select a single column by number or name', ) class MetricMaskOutputSpec(TraitedSpec): - out_file = File(desc="output metric file") + out_file = File(desc='output metric file') class MetricMask(WBCommand): @@ -671,7 +670,7 @@ class MetricMask(WBCommand): input_spec = MetricMaskInputSpec output_spec = MetricMaskOutputSpec - _cmd = "wb_command -metric-mask" + _cmd = 'wb_command -metric-mask' class MetricFillHolesInputSpec(TraitedSpec): @@ -692,34 +691,34 @@ class MetricFillHolesInputSpec(TraitedSpec): surface_file = File( mandatory=True, exists=True, - argstr="%s", + argstr='%s', position=1, - desc="surface to use for neighbor information", + desc='surface to use for neighbor information', ) metric_file = File( mandatory=True, exists=True, - argstr="%s", + argstr='%s', position=2, - desc="input ROI metric", + desc='input ROI metric', ) out_file = File( - name_template="%s_filled.shape.gii", - name_source="metric_file", + name_template='%s_filled.shape.gii', + name_source='metric_file', keep_extension=False, - argstr="%s", + argstr='%s', position=3, - desc="output ROI metric", + desc='output ROI metric', ) corrected_areas = File( exists=True, - argstr="-corrected-areas %s", - desc="vertex areas to use instead of computing them from the surface", + argstr='-corrected-areas %s', + desc='vertex areas to use instead of computing them from the surface', ) class MetricFillHolesOutputSpec(TraitedSpec): - out_file = File(desc="output ROI metric") + out_file = File(desc='output ROI metric') class MetricFillHoles(WBCommand): @@ -738,7 +737,7 @@ class MetricFillHoles(WBCommand): input_spec = MetricFillHolesInputSpec output_spec = MetricFillHolesOutputSpec - _cmd = "wb_command -metric-fill-holes" + _cmd = 'wb_command -metric-fill-holes' class MetricRemoveIslandsInputSpec(TraitedSpec): @@ -759,34 +758,34 @@ class MetricRemoveIslandsInputSpec(TraitedSpec): surface_file = File( mandatory=True, exists=True, - argstr="%s", + argstr='%s', position=1, - desc="surface to use for neighbor information", + desc='surface to use for neighbor information', ) metric_file = File( mandatory=True, exists=True, - argstr="%s", + argstr='%s', position=2, - desc="input ROI metric", + desc='input ROI metric', ) out_file = File( - name_template="%s_noislands.shape.gii", - name_source="metric_file", + name_template='%s_noislands.shape.gii', + name_source='metric_file', keep_extension=False, - argstr="%s", + argstr='%s', position=3, - desc="output ROI metric", + desc='output ROI metric', ) corrected_areas = File( exists=True, - argstr="-corrected-areas %s", - desc="vertex areas to use instead of computing them from the surface", + argstr='-corrected-areas %s', + desc='vertex areas to use instead of computing them from the surface', ) class MetricRemoveIslandsOutputSpec(TraitedSpec): - out_file = File(desc="output ROI metric") + out_file = File(desc='output ROI metric') class MetricRemoveIslands(WBCommand): @@ -805,4 +804,4 @@ class MetricRemoveIslands(WBCommand): input_spec = MetricRemoveIslandsInputSpec output_spec = MetricRemoveIslandsOutputSpec - _cmd = "wb_command -metric-remove-islands" + _cmd = 'wb_command -metric-remove-islands' diff --git a/niworkflows/reports/__init__.py b/niworkflows/reports/__init__.py index 961d5ab1a8b..107316216bc 100644 --- a/niworkflows/reports/__init__.py +++ b/niworkflows/reports/__init__.py @@ -3,4 +3,4 @@ from .core import generate_reports -__all__ = ["generate_reports"] +__all__ = ['generate_reports'] diff --git a/niworkflows/reports/core.py b/niworkflows/reports/core.py index 81c021bada7..90348f39c1b 100644 --- a/niworkflows/reports/core.py +++ b/niworkflows/reports/core.py @@ -26,12 +26,14 @@ Generalizes report generation across BIDS-Apps """ -from pathlib import Path + import re -from itertools import compress from collections import defaultdict -from bids.layout import BIDSLayout, add_config_paths +from itertools import compress +from pathlib import Path + import jinja2 +from bids.layout import BIDSLayout, add_config_paths from nipype.utils.filemanip import copyfile from .. import data, load_resource @@ -43,7 +45,7 @@ if "Configuration 'figures' already exists" != str(e): raise -PLURAL_SUFFIX = defaultdict(str("s").format, [("echo", "es")]) +PLURAL_SUFFIX = defaultdict('s'.format, [('echo', 'es')]) SVG_SNIPPET = [ """\ @@ -74,6 +76,7 @@ class Smallest: >>> sorted([1, None, 2], key=lambda x: x if x is not None else Smallest()) [None, 1, 2] """ + def __lt__(self, other): return not isinstance(other, Smallest) @@ -166,28 +169,28 @@ class Reportlet(Element): def __init__(self, layout, out_dir, config=None): if not config: - raise RuntimeError("Reportlet must have a config object") + raise RuntimeError('Reportlet must have a config object') self.name = config.get( - "name", "_".join("%s-%s" % i for i in sorted(config["bids"].items())) + 'name', '_'.join(f'{k}-{v}' for k, v in sorted(config['bids'].items())) ) - self.title = config.get("title") - self.subtitle = config.get("subtitle") - self.description = config.get("description") + self.title = config.get('title') + self.subtitle = config.get('subtitle') + self.description = config.get('description') # Query the BIDS layout of reportlets - files = layout.get(**config["bids"]) + files = layout.get(**config['bids']) self.components = [] for bidsfile in files: src = Path(bidsfile.path) - ext = "".join(src.suffixes) - desc_text = config.get("caption") + ext = ''.join(src.suffixes) + desc_text = config.get('caption') contents = None - if ext == ".html": + if ext == '.html': contents = src.read_text().strip() - elif ext == ".svg": + elif ext == '.svg': entities = dict(bidsfile.entities) if desc_text: desc_text = desc_text.format(**entities) @@ -200,7 +203,7 @@ def __init__(self, layout, out_dir, config=None): dst.parent.mkdir(parents=True, exist_ok=True) copyfile(src, dst, copy=True, use_hardlink=True) - contents = SVG_SNIPPET[config.get("static", True)].format(html_anchor) + contents = SVG_SNIPPET[config.get('static', True)].format(html_anchor) # Our current implementations of dynamic reportlets do this themselves, # however I'll leave the code here since this is potentially something we @@ -227,7 +230,7 @@ def is_empty(self): class SubReport(Element): """SubReports are sections within a Report.""" - def __init__(self, name, isnested=False, reportlets=None, title=""): + def __init__(self, name, isnested=False, reportlets=None, title=''): self.name = name self.title = title self.reportlets = reportlets or [] @@ -266,7 +269,7 @@ def __init__( out_dir, run_uuid, config=None, - out_filename="report.html", + out_filename='report.html', packagename=None, reportlets_dir=None, subject_id=None, @@ -282,36 +285,35 @@ def __init__( self.packagename = packagename self.subject_id = subject_id if subject_id is not None: - self.subject_id = ( - subject_id[4:] if subject_id.startswith("sub-") else subject_id - ) - self.out_filename = f"sub-{self.subject_id}.html" + self.subject_id = subject_id[4:] if subject_id.startswith('sub-') else subject_id + self.out_filename = f'sub-{self.subject_id}.html' # Default template from niworkflows self.template_path = load_resource('reports') / 'report.tpl' + if not self.template_path.exists(): + raise RuntimeError('Could not find report template. Corrupted installation.') self._load_config(Path(config or load_resource('reports') / 'default.yml')) - assert self.template_path.exists() def _load_config(self, config): from yaml import safe_load as load settings = load(config.read_text()) - self.packagename = self.packagename or settings.get("package", None) + self.packagename = self.packagename or settings.get('package', None) if self.packagename is not None: self.root = self.root / self.packagename self.out_dir = self.out_dir / self.packagename if self.subject_id is not None: - self.root = self.root / "sub-{}".format(self.subject_id) + self.root = self.root / f'sub-{self.subject_id}' - if "template_path" in settings: - self.template_path = config.parent / settings["template_path"] + if 'template_path' in settings: + self.template_path = config.parent / settings['template_path'] - self.index(settings["sections"]) + self.index(settings['sections']) def init_layout(self): - self.layout = BIDSLayout(self.root, config="figures", validate=False) + self.layout = BIDSLayout(self.root, config='figures', validate=False) def index(self, config): """ @@ -324,15 +326,13 @@ def index(self, config): for subrep_cfg in config: # First determine whether we need to split by some ordering # (ie. sessions / tasks / runs), which are separated by commas. - orderings = [ - s for s in subrep_cfg.get("ordering", "").strip().split(",") if s - ] + orderings = [s for s in subrep_cfg.get('ordering', '').strip().split(',') if s] entities, list_combos = self._process_orderings(orderings, self.layout) if not list_combos: # E.g. this is an anatomical reportlet reportlets = [ Reportlet(self.layout, self.out_dir, config=cfg) - for cfg in subrep_cfg["reportlets"] + for cfg in subrep_cfg['reportlets'] ] else: # Do not use dictionary for queries, as we need to preserve ordering @@ -343,15 +343,14 @@ def index(self, config): c_filt = list(filter(None, c)) ent_filt = list(compress(entities, c)) # Set a common title for this particular combination c - title = "Reports for: %s." % ", ".join( - [ - '%s %s' - % (ent_filt[i], c_filt[i]) + title = 'Reports for: {}.'.format( + ', '.join( + f'{ent_filt[i]} {c_filt[i]}' for i in range(len(c_filt)) - ] + ) ) - for cfg in subrep_cfg["reportlets"]: - cfg["bids"].update({entities[i]: c[i] for i in range(len(c))}) + for cfg in subrep_cfg['reportlets']: + cfg['bids'].update({entities[i]: c[i] for i in range(len(c))}) rlet = Reportlet(self.layout, self.out_dir, config=cfg) if not rlet.is_empty(): rlet.title = title @@ -362,59 +361,53 @@ def index(self, config): reportlets = [r for r in reportlets if not r.is_empty()] if reportlets: sub_report = SubReport( - subrep_cfg["name"], + subrep_cfg['name'], isnested=bool(list_combos), reportlets=reportlets, - title=subrep_cfg.get("title"), + title=subrep_cfg.get('title'), ) self.sections.append(sub_report) # Populate errors section - error_dir = ( - self.out_dir / "sub-{}".format(self.subject_id) / "log" / self.run_uuid - ) + error_dir = self.out_dir / f'sub-{self.subject_id}' / 'log' / self.run_uuid if error_dir.is_dir(): from ..utils.misc import read_crashfile - self.errors = [read_crashfile(str(f)) for f in error_dir.glob("crash*.*")] + self.errors = [read_crashfile(str(f)) for f in error_dir.glob('crash*.*')] def generate_report(self): """Once the Report has been indexed, the final HTML can be generated""" - logs_path = self.out_dir / "logs" + logs_path = self.out_dir / 'logs' boilerplate = [] boiler_idx = 0 - if (logs_path / "CITATION.html").exists(): + if (logs_path / 'CITATION.html').exists(): text = ( - re.compile("(.*?)", re.DOTALL | re.IGNORECASE) - .findall((logs_path / "CITATION.html").read_text())[0] + re.compile('(.*?)', re.DOTALL | re.IGNORECASE) + .findall((logs_path / 'CITATION.html').read_text())[0] .strip() ) - boilerplate.append( - (boiler_idx, "HTML", f'
{text}
') - ) + boilerplate.append((boiler_idx, 'HTML', f'
{text}
')) boiler_idx += 1 - if (logs_path / "CITATION.md").exists(): - text = (logs_path / "CITATION.md").read_text() - boilerplate.append((boiler_idx, "Markdown", f"
{text}
\n")) + if (logs_path / 'CITATION.md').exists(): + text = (logs_path / 'CITATION.md').read_text() + boilerplate.append((boiler_idx, 'Markdown', f'
{text}
\n')) boiler_idx += 1 - if (logs_path / "CITATION.tex").exists(): + if (logs_path / 'CITATION.tex').exists(): text = ( - re.compile( - r"\\begin{document}(.*?)\\end{document}", re.DOTALL | re.IGNORECASE - ) - .findall((logs_path / "CITATION.tex").read_text())[0] + re.compile(r'\\begin{document}(.*?)\\end{document}', re.DOTALL | re.IGNORECASE) + .findall((logs_path / 'CITATION.tex').read_text())[0] .strip() ) - bib = data.Loader(self.packagename).readable("data/boilerplate.bib") + bib = data.Loader(self.packagename).readable('data/boilerplate.bib') boilerplate.append( ( boiler_idx, - "LaTeX", - f"
{text}
\n

Bibliography

\n
{bib.read_text()}
\n", + 'LaTeX', + f'
{text}
\n

Bibliography

\n
{bib.read_text()}
\n', ) ) boiler_idx += 1 @@ -423,7 +416,7 @@ def generate_report(self): loader=jinja2.FileSystemLoader(searchpath=str(self.template_path.parent)), trim_blocks=True, lstrip_blocks=True, - autoescape=False, + autoescape=False, # noqa: S701 XXX Investigate if this is a problem in nireports. ) report_tpl = env.get_template(self.template_path.name) report_render = report_tpl.render( @@ -432,7 +425,7 @@ def generate_report(self): # Write out report self.out_dir.mkdir(parents=True, exist_ok=True) - (self.out_dir / self.out_filename).write_text(report_render, encoding="UTF-8") + (self.out_dir / self.out_filename).write_text(report_render, encoding='UTF-8') return len(self.errors) @staticmethod @@ -476,14 +469,10 @@ def _process_orderings(orderings, layout): # the "kept" entities entities = list(compress(orderings, keep_idx)) # the "kept" value combinations - value_combos = [ - tuple(compress(value_combo, keep_idx)) for value_combo in all_value_combos - ] + value_combos = [tuple(compress(value_combo, keep_idx)) for value_combo in all_value_combos] # sort the value combinations alphabetically from the first entity to the last entity value_combos.sort( - key=lambda entry: tuple( - value if value is not None else Smallest() for value in entry - ) + key=lambda entry: tuple(value if value is not None else Smallest() for value in entry) ) return entities, value_combos @@ -529,7 +518,7 @@ def generate_reports( """Execute run_reports on a list of subjects.""" reportlets_dir = None if work_dir is not None: - reportlets_dir = Path(work_dir) / "reportlets" + reportlets_dir = Path(work_dir) / 'reportlets' report_errors = [ run_reports( output_dir, @@ -546,15 +535,13 @@ def generate_reports( if errno: import logging - logger = logging.getLogger("cli") - error_list = ", ".join( - "%s (%d)" % (subid, err) - for subid, err in zip(subject_list, report_errors) - if err + logger = logging.getLogger('cli') + error_list = ', '.join( + '%s (%d)' % (subid, err) for subid, err in zip(subject_list, report_errors) if err ) logger.error( - "Preprocessing did not finish successfully. Errors occurred while processing " - "data from participants: %s. Check the HTML reports for details.", + 'Preprocessing did not finish successfully. Errors occurred while processing ' + 'data from participants: %s. Check the HTML reports for details.', error_list, ) return errno diff --git a/niworkflows/reports/tests/test_core.py b/niworkflows/reports/tests/test_core.py index 2006659076f..2c8c9200f58 100644 --- a/niworkflows/reports/tests/test_core.py +++ b/niworkflows/reports/tests/test_core.py @@ -20,59 +20,58 @@ # # https://www.nipreps.org/community/licensing/ # -""" Testing module for niworkflows.reports.core """ +"""Testing module for niworkflows.reports.core""" -from pathlib import Path import tempfile from itertools import product -from yaml import safe_load as load +from pathlib import Path import matplotlib.pyplot as plt -from bids.layout.writing import build_path -from bids.layout import BIDSLayout - import pytest +from bids.layout import BIDSLayout +from bids.layout.writing import build_path +from yaml import safe_load as load from ... import load_resource from ..core import Report -@pytest.fixture() +@pytest.fixture def bids_sessions(tmpdir_factory): f, _ = plt.subplots() - svg_dir = tmpdir_factory.mktemp("work") / "fmriprep" + svg_dir = tmpdir_factory.mktemp('work') / 'fmriprep' svg_dir.ensure_dir() pattern = ( - "sub-{subject}[/ses-{session}]/{datatype}/" - "sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}]" - "[_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}]" - "[_mod-{modality}][_run-{run}][_echo-{echo}][_space-{space}]" - "[_desc-{desc}]_{suffix}{extension<.svg>}" + 'sub-{subject}[/ses-{session}]/{datatype}/' + 'sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}]' + '[_ce-{ceagent}][_dir-{direction}][_rec-{reconstruction}]' + '[_mod-{modality}][_run-{run}][_echo-{echo}][_space-{space}]' + '[_desc-{desc}]_{suffix}{extension<.svg>}' ) - subjects = ["01"] - tasks = ["t1", "t2", "t3"] - runs = ["01", "02", None] - ces = ["none", "Gd"] - descs = ["aroma", "bbregister", "carpetplot", "rois"] + subjects = ['01'] + tasks = ['t1', 't2', 't3'] + runs = ['01', '02', None] + ces = ['none', 'Gd'] + descs = ['aroma', 'bbregister', 'carpetplot', 'rois'] # create functional data for both sessions - ses1_combos = product(subjects, ["1"], tasks, [None], runs, descs) - ses2_combos = product(subjects, ["2"], tasks, ces, [None], descs) + ses1_combos = product(subjects, ['1'], tasks, [None], runs, descs) + ses2_combos = product(subjects, ['2'], tasks, ces, [None], descs) # have no runs in the second session (ex: dmriprep test data) # https://github.com/nipreps/dmriprep/pull/59 all_combos = list(ses1_combos) + list(ses2_combos) for subject, session, task, ce, run, desc in all_combos: entities = { - "subject": subject, - "session": session, - "task": task, - "ceagent": ce, - "run": run, - "desc": desc, - "extension": ".svg", - "suffix": "bold", - "datatype": "figures", + 'subject': subject, + 'session': session, + 'task': task, + 'ceagent': ce, + 'run': run, + 'desc': desc, + 'extension': '.svg', + 'suffix': 'bold', + 'datatype': 'figures', } bids_path = build_path(entities, pattern) file_path = svg_dir / bids_path @@ -81,17 +80,17 @@ def bids_sessions(tmpdir_factory): # create anatomical data anat_opts = [ - {"desc": "brain"}, - {"desc": "conform"}, - {"desc": "reconall"}, - {"desc": "rois"}, - {"suffix": "dseg"}, - {"space": "MNI152NLin6Asym"}, - {"space": "MNI152NLin2009cAsym"}, + {'desc': 'brain'}, + {'desc': 'conform'}, + {'desc': 'reconall'}, + {'desc': 'rois'}, + {'suffix': 'dseg'}, + {'space': 'MNI152NLin6Asym'}, + {'space': 'MNI152NLin2009cAsym'}, ] anat_combos = product(subjects, anat_opts) for subject, anat_opt in anat_combos: - anat_entities = {"subject": subject, "datatype": "anat", "suffix": "t1w"} + anat_entities = {'subject': subject, 'datatype': 'anat', 'suffix': 't1w'} anat_entities.update(**anat_opt) bids_path = build_path(entities, pattern) file_path = svg_dir / bids_path @@ -101,70 +100,71 @@ def bids_sessions(tmpdir_factory): return svg_dir.dirname -@pytest.fixture() +@pytest.fixture def example_workdir(): from ... import data - workdir = data.load("tests/work") + + workdir = data.load('tests/work') if not workdir.is_dir(): - pytest.skip("Missing example workdir; run this test from a source repository") - yield workdir + pytest.skip('Missing example workdir; run this test from a source repository') + return workdir -@pytest.fixture() +@pytest.fixture def test_report1(tmp_path, example_workdir): - yield Report( + return Report( tmp_path, - "fakeuuid", - reportlets_dir=example_workdir / "reportlets", - subject_id="01", - packagename="fmriprep", + 'fakeuuid', + reportlets_dir=example_workdir / 'reportlets', + subject_id='01', + packagename='fmriprep', ) -@pytest.fixture() +@pytest.fixture def test_report2(tmp_path, bids_sessions): - yield Report( + return Report( tmp_path, - "fakeuuid", + 'fakeuuid', reportlets_dir=Path(bids_sessions), - subject_id="01", - packagename="fmriprep", + subject_id='01', + packagename='fmriprep', ) @pytest.mark.parametrize( - "orderings,expected_entities,expected_value_combos", + ('orderings', 'expected_entities', 'expected_value_combos'), [ ( - ["session", "task", "run"], - ["task", "run"], + ['session', 'task', 'run'], + ['task', 'run'], [ - ("faketask", None), - ("faketask2", None), - ("faketaskwithruns", 1), - ("faketaskwithruns", 2), - ("mixedgamblestask", 1), - ("mixedgamblestask", 2), - ("mixedgamblestask", 3), + ('faketask', None), + ('faketask2', None), + ('faketaskwithruns', 1), + ('faketaskwithruns', 2), + ('mixedgamblestask', 1), + ('mixedgamblestask', 2), + ('mixedgamblestask', 3), ], ), ( - ["run", "task", "session"], - ["run", "task"], + ['run', 'task', 'session'], + ['run', 'task'], [ - (None, "faketask"), - (None, "faketask2"), - (1, "faketaskwithruns"), - (1, "mixedgamblestask"), - (2, "faketaskwithruns"), - (2, "mixedgamblestask"), - (3, "mixedgamblestask"), + (None, 'faketask'), + (None, 'faketask2'), + (1, 'faketaskwithruns'), + (1, 'mixedgamblestask'), + (2, 'faketaskwithruns'), + (2, 'mixedgamblestask'), + (3, 'mixedgamblestask'), ], ), - ([""], [], []), - (["session"], [], []), + ([''], [], []), + (['session'], [], []), ([], [], []), - (["madeupentity"], [], []), + (['madeupentity'], [], []), ], ) def test_process_orderings_small( @@ -179,24 +179,24 @@ def test_process_orderings_small( @pytest.mark.parametrize( - "orderings,expected_entities,first_value_combo,last_value_combo", + ('orderings', 'expected_entities', 'first_value_combo', 'last_value_combo'), [ ( - ["session", "task", "ceagent", "run"], - ["session", "task", "ceagent", "run"], - ("1", "t1", None, None), - ("2", "t3", "none", None), + ['session', 'task', 'ceagent', 'run'], + ['session', 'task', 'ceagent', 'run'], + ('1', 't1', None, None), + ('2', 't3', 'none', None), ), ( - ["run", "task", "session"], - ["run", "task", "session"], - (None, "t1", "1"), - (2, "t3", "1"), + ['run', 'task', 'session'], + ['run', 'task', 'session'], + (None, 't1', '1'), + (2, 't3', '1'), ), - ([""], [], None, None), - (["session"], ["session"], ("1",), ("2",)), + ([''], [], None, None), + (['session'], ['session'], ('1',), ('2',)), ([], [], None, None), - (["madeupentity"], [], None, None), + (['madeupentity'], [], None, None), ], ) def test_process_orderings_large( @@ -215,16 +215,16 @@ def test_process_orderings_large( @pytest.mark.parametrize( - "ordering", + 'ordering', [ - ("session"), - ("task"), - ("run"), - ("session,task"), - ("session,task,run"), - ("session,task,ceagent,run"), - ("session,task,acquisition,ceagent,reconstruction,direction,run,echo"), - ("session,task,run,madeupentity"), + ('session'), + ('task'), + ('run'), + ('session,task'), + ('session,task,run'), + ('session,task,ceagent,run'), + ('session,task,acquisition,ceagent,reconstruction,direction,run,echo'), + ('session,task,run,madeupentity'), ], ) def test_generated_reportlets(bids_sessions, ordering): @@ -232,23 +232,23 @@ def test_generated_reportlets(bids_sessions, ordering): out_dir = tempfile.mkdtemp() report = Report( Path(out_dir), - "fakeuuid", + 'fakeuuid', reportlets_dir=Path(bids_sessions), - subject_id="01", - packagename="fmriprep", + subject_id='01', + packagename='fmriprep', ) - settings = load(load_resource.readable("reports/default.yml").read_text()) + settings = load(load_resource.readable('reports/default.yml').read_text()) # change settings to only include some missing ordering - settings["sections"][3]["ordering"] = ordering - report.index(settings["sections"]) + settings['sections'][3]['ordering'] = ordering + report.index(settings['sections']) # expected number of reportlets - expected_reportlets_num = len(report.layout.get(extension=".svg")) + expected_reportlets_num = len(report.layout.get(extension='.svg')) # bids_session uses these entities - needed_entities = ["session", "task", "ceagent", "run"] + needed_entities = ['session', 'task', 'ceagent', 'run'] # the last section is the most recently run reportlets_num = len(report.sections[-1].reportlets) # get the number of figures in the output directory - out_layout = BIDSLayout(out_dir, config="figures", validate=False) + out_layout = BIDSLayout(out_dir, config='figures', validate=False) out_figs = len(out_layout.get()) # if ordering does not contain all the relevant entities # then there should be fewer reportlets than expected @@ -259,28 +259,28 @@ def test_generated_reportlets(bids_sessions, ordering): @pytest.mark.parametrize( - "subject_id,out_html", + ('subject_id', 'out_html'), [ - ("sub-01", "sub-01.html"), - ("sub-sub1", "sub-sub1.html"), - ("01", "sub-01.html"), - ("sub1", "sub-sub1.html"), + ('sub-01', 'sub-01.html'), + ('sub-sub1', 'sub-sub1.html'), + ('01', 'sub-01.html'), + ('sub1', 'sub-sub1.html'), ], ) def test_subject_id(tmp_path, subject_id, out_html): - reports = tmp_path / "reports" + reports = tmp_path / 'reports' Path( reports - / "fmriprep" - / (subject_id if subject_id.startswith("sub-") else f"sub-{subject_id}") + / 'fmriprep' + / (subject_id if subject_id.startswith('sub-') else f'sub-{subject_id}') ).mkdir(parents=True) report = Report( str(tmp_path), - "myuniqueid", + 'myuniqueid', reportlets_dir=reports, subject_id=subject_id, - packagename="fmriprep", + packagename='fmriprep', ) - assert report.subject_id[:4] != "sub-" + assert report.subject_id[:4] != 'sub-' assert report.out_filename == out_html diff --git a/niworkflows/testing.py b/niworkflows/testing.py index 837f7b5817e..c73f30339ed 100644 --- a/niworkflows/testing.py +++ b/niworkflows/testing.py @@ -1,16 +1,16 @@ -import pytest -from functools import wraps import os +from functools import wraps from pathlib import Path -from nipype.interfaces import fsl, freesurfer as fs, afni -test_data_env = os.getenv( - "TEST_DATA_HOME", str(Path.home() / ".cache" / "stanford-crn") -) -test_output_dir = os.getenv("TEST_OUTPUT_DIR") -test_workdir = os.getenv("TEST_WORK_DIR") +import pytest +from nipype.interfaces import afni, fsl +from nipype.interfaces import freesurfer as fs -data_dir = Path(test_data_env) / "BIDS-examples-1-enh-ds054" +test_data_env = os.getenv('TEST_DATA_HOME', str(Path.home() / '.cache' / 'stanford-crn')) +test_output_dir = os.getenv('TEST_OUTPUT_DIR') +test_workdir = os.getenv('TEST_WORK_DIR') + +data_dir = Path(test_data_env) / 'BIDS-examples-1-enh-ds054' def create_canary(predicate, message): @@ -23,6 +23,7 @@ def decorator(f): def wrapper(*args, **kwargs): canary() return f(*args, **kwargs) + return wrapper return canary, decorator @@ -30,13 +31,15 @@ def wrapper(*args, **kwargs): data_env_canary, needs_data_env = create_canary( not os.path.isdir(test_data_env), - "Test data must be made available in ~/.cache/stanford-crn or in a " - "directory referenced by the TEST_DATA_HOME environment variable.") + 'Test data must be made available in ~/.cache/stanford-crn or in a ' + 'directory referenced by the TEST_DATA_HOME environment variable.', +) data_dir_canary, needs_data_dir = create_canary( not os.path.isdir(data_dir), - "Test data must be made available in ~/.cache/stanford-crn or in a " - "directory referenced by the TEST_DATA_HOME environment variable.") + 'Test data must be made available in ~/.cache/stanford-crn or in a ' + 'directory referenced by the TEST_DATA_HOME environment variable.', +) has_fsl = fsl.Info.version() is not None has_freesurfer = fs.Info.version() is not None diff --git a/niworkflows/tests/conftest.py b/niworkflows/tests/conftest.py index 0dc40dd7a16..3bc1d75897e 100644 --- a/niworkflows/tests/conftest.py +++ b/niworkflows/tests/conftest.py @@ -20,13 +20,16 @@ # # https://www.nipreps.org/community/licensing/ # -""" py.test configuration file """ +"""py.test configuration file""" + +import datetime as dt import os from pathlib import Path -import datetime as dt + import pytest from templateflow.api import get as get_template -from niworkflows.testing import test_data_env, data_env_canary + +from niworkflows.testing import data_env_canary, test_data_env from niworkflows.tests.data import load_test_data datadir = load_test_data() @@ -44,18 +47,18 @@ def _run_interface_mock(objekt, runtime): @pytest.fixture def reference(): - return str(get_template("MNI152Lin", resolution=2, desc=None, suffix="T1w")) + return str(get_template('MNI152Lin', resolution=2, desc=None, suffix='T1w')) @pytest.fixture def reference_mask(): - return str(get_template("MNI152Lin", resolution=2, desc="brain", suffix="mask")) + return str(get_template('MNI152Lin', resolution=2, desc='brain', suffix='mask')) @pytest.fixture def moving(): data_env_canary() - return str(Path(test_data_env) / "ds000003/sub-01/anat/sub-01_T1w.nii.gz") + return str(Path(test_data_env) / 'ds000003/sub-01/anat/sub-01_T1w.nii.gz') @pytest.fixture @@ -63,4 +66,4 @@ def nthreads(): from multiprocessing import cpu_count # Tests are linear, so don't worry about leaving space for a control thread - return min(int(os.getenv("CIRCLE_NPROCS", "8")), cpu_count()) + return min(int(os.getenv('CIRCLE_NPROCS', '8')), cpu_count()) diff --git a/niworkflows/tests/data/__init__.py b/niworkflows/tests/data/__init__.py index 0eb4f637ab0..f3a8363d212 100644 --- a/niworkflows/tests/data/__init__.py +++ b/niworkflows/tests/data/__init__.py @@ -2,6 +2,7 @@ .. autofunction:: load_test_data """ + from acres import Loader load_test_data = Loader(__package__) diff --git a/niworkflows/tests/generate_data.py b/niworkflows/tests/generate_data.py index f6f57d1b574..32866732beb 100644 --- a/niworkflows/tests/generate_data.py +++ b/niworkflows/tests/generate_data.py @@ -15,7 +15,7 @@ def create_series_map(): series_exponent=0, series_start=0, series_step=1, - series_unit='SECOND' + series_unit='SECOND', ) def create_geometry_map(): @@ -24,13 +24,13 @@ def create_geometry_map(): timeseries = np.zeros((timepoints, 0)) for name, data in models: - if "CORTEX" in name: - model_type = "CIFTI_MODEL_TYPE_SURFACE" - attr = "vertex_indices" + if 'CORTEX' in name: + model_type = 'CIFTI_MODEL_TYPE_SURFACE' + attr = 'vertex_indices' indices = ci.Cifti2VertexIndices(np.arange(len(data))) else: - model_type = "CIFTI_MODEL_TYPE_VOXELS" - attr = "voxel_indices_ijk" + model_type = 'CIFTI_MODEL_TYPE_VOXELS' + attr = 'voxel_indices_ijk' indices = ci.Cifti2VoxelIndicesIJK(np.arange(len(data))) bm = ci.Cifti2BrainModel( index_offset=index_offset, @@ -39,9 +39,9 @@ def create_geometry_map(): brain_structure=name, ) setattr(bm, attr, indices) - if model_type == "CIFTI_MODEL_TYPE_SURFACE": + if model_type == 'CIFTI_MODEL_TYPE_SURFACE': # define total vertices for surface models - setattr(bm, "surface_number_of_vertices", 32492) + bm.surface_number_of_vertices = 32492 index_offset += len(data) brain_models.append(bm) timeseries = np.column_stack((timeseries, data.T)) @@ -55,7 +55,7 @@ def create_geometry_map(): return ci.Cifti2MatrixIndicesMap( (1,), - "CIFTI_INDEX_TYPE_BRAIN_MODELS", + 'CIFTI_INDEX_TYPE_BRAIN_MODELS', maps=brain_models, ), timeseries @@ -66,8 +66,8 @@ def create_geometry_map(): matrix.append(geometry_map) hdr = ci.Cifti2Header(matrix) img = ci.Cifti2Image(dataobj=ts, header=hdr) - img.nifti_header.set_intent("NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES") + img.nifti_header.set_intent('NIFTI_INTENT_CONNECTIVITY_DENSE_SERIES') - out_file = Path("test.dtseries.nii").absolute() + out_file = Path('test.dtseries.nii').absolute() ci.save(img, out_file) return out_file diff --git a/niworkflows/tests/test_confounds.py b/niworkflows/tests/test_confounds.py index e09aac30756..b3862b020b5 100644 --- a/niworkflows/tests/test_confounds.py +++ b/niworkflows/tests/test_confounds.py @@ -20,12 +20,15 @@ # # https://www.nipreps.org/community/licensing/ # -""" Utilities tests """ +"""Utilities tests""" + import os from shutil import copy + import numpy as np import pandas as pd from nipype.pipeline import engine as pe + from ..interfaces.confounds import ExpandModel, SpikeRegressors from ..interfaces.plotting import CompCorVariancePlot, ConfoundsCorrelationPlot from .conftest import datadir @@ -34,27 +37,27 @@ def _smoke_test_report(report_interface, artifact_name): out_report = report_interface.run().outputs.out_file - save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False) + save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False) if save_artifacts: copy(out_report, os.path.join(save_artifacts, artifact_name)) - assert os.path.isfile(out_report), 'Report "%s" does not exist' % out_report + assert os.path.isfile(out_report), f'Report "{out_report}" does not exist' def _expand_test(model_formula): - orig_data_file = os.path.join(datadir, "confounds_test.tsv") + orig_data_file = os.path.join(datadir, 'confounds_test.tsv') exp_data_file = ( pe.Node( ExpandModel(confounds_file=orig_data_file, model_formula=model_formula), - name="expand_model", + name='expand_model', ) .run() .outputs.confounds_file ) - return pd.read_csv(exp_data_file, sep="\t") + return pd.read_csv(exp_data_file, sep='\t') -def _spikes_test(lags=None, mincontig=None, fmt="mask"): - orig_data_file = os.path.join(datadir, "spikes_test.tsv") +def _spikes_test(lags=None, mincontig=None, fmt='mask'): + orig_data_file = os.path.join(datadir, 'spikes_test.tsv') lags = lags or [0] spk_data_file = ( pe.Node( @@ -67,23 +70,23 @@ def _spikes_test(lags=None, mincontig=None, fmt="mask"): output_format=fmt, concatenate=False, ), - name="spike_regressors", + name='spike_regressors', ) .run() .outputs.confounds_file ) - return pd.read_csv(spk_data_file, sep="\t") + return pd.read_csv(spk_data_file, sep='\t') def test_expansion_variable_selection(): """Test model expansion: simple variable selection""" - model_formula = "a + b + c + d" + model_formula = 'a + b + c + d' expected_data = pd.DataFrame( { - "a": [-1, -2, -3, -4, -5], - "b": [2, 2, 2, 2, 2], - "c": [0, 1, 0, 1, 0], - "d": [9, 7, 5, 3, 1], + 'a': [-1, -2, -3, -4, -5], + 'b': [2, 2, 2, 2, 2], + 'c': [0, 1, 0, 1, 0], + 'd': [9, 7, 5, 3, 1], } ) exp_data = _expand_test(model_formula) @@ -92,64 +95,60 @@ def test_expansion_variable_selection(): def test_expansion_derivatives_and_powers(): """Temporal derivatives and quadratics""" - model_formula = "(dd1(a) + d1(b))^^2 + d1-2((c)^2) + d + others" + model_formula = '(dd1(a) + d1(b))^^2 + d1-2((c)^2) + d + others' # b_derivative1_power2 is dropped as an exact duplicate of b_derivative1 expected_data = pd.DataFrame( { - "a": [-1, -2, -3, -4, -5], - "a_power2": [1, 4, 9, 16, 25], - "a_derivative1": [np.nan, -1, -1, -1, -1], - "a_derivative1_power2": [np.nan, 1, 1, 1, 1], - "b_derivative1": [np.nan, 0, 0, 0, 0], - "b_derivative1_power2": [np.nan, 0, 0, 0, 0], - "c_power2_derivative1": [np.nan, 1, -1, 1, -1], - "c_power2_derivative2": [np.nan, np.nan, -2, 2, -2], - "d": [9, 7, 5, 3, 1], - "e": [0, 0, 0, 0, 0], - "f": [np.nan, 6, 4, 2, 0], + 'a': [-1, -2, -3, -4, -5], + 'a_power2': [1, 4, 9, 16, 25], + 'a_derivative1': [np.nan, -1, -1, -1, -1], + 'a_derivative1_power2': [np.nan, 1, 1, 1, 1], + 'b_derivative1': [np.nan, 0, 0, 0, 0], + 'b_derivative1_power2': [np.nan, 0, 0, 0, 0], + 'c_power2_derivative1': [np.nan, 1, -1, 1, -1], + 'c_power2_derivative2': [np.nan, np.nan, -2, 2, -2], + 'd': [9, 7, 5, 3, 1], + 'e': [0, 0, 0, 0, 0], + 'f': [np.nan, 6, 4, 2, 0], } ) exp_data = _expand_test(model_formula) assert set(exp_data.columns) == set(expected_data.columns) for col in expected_data.columns: - pd.testing.assert_series_equal( - expected_data[col], exp_data[col], check_dtype=False - ) + pd.testing.assert_series_equal(expected_data[col], exp_data[col], check_dtype=False) def test_expansion_na_robustness(): """NA robustness""" - model_formula = "(dd1(f))^^2" + model_formula = '(dd1(f))^^2' expected_data = pd.DataFrame( { - "f": [np.nan, 6, 4, 2, 0], - "f_power2": [np.nan, 36, 16, 4, 0], - "f_derivative1": [np.nan, np.nan, -2, -2, -2], - "f_derivative1_power2": [np.nan, np.nan, 4, 4, 4], + 'f': [np.nan, 6, 4, 2, 0], + 'f_power2': [np.nan, 36, 16, 4, 0], + 'f_derivative1': [np.nan, np.nan, -2, -2, -2], + 'f_derivative1_power2': [np.nan, np.nan, 4, 4, 4], } ) exp_data = _expand_test(model_formula) assert set(exp_data.columns) == set(expected_data.columns) for col in expected_data.columns: - pd.testing.assert_series_equal( - expected_data[col], exp_data[col], check_dtype=False - ) + pd.testing.assert_series_equal(expected_data[col], exp_data[col], check_dtype=False) def test_spikes(): """Test outlier flagging""" outliers = [1, 1, 0, 0, 1] spk_data = _spikes_test() - assert np.all(np.isclose(outliers, spk_data["motion_outlier"])) + assert np.all(np.isclose(outliers, spk_data['motion_outlier'])) outliers_spikes = pd.DataFrame( { - "motion_outlier00": [1, 0, 0, 0, 0], - "motion_outlier01": [0, 1, 0, 0, 0], - "motion_outlier02": [0, 0, 0, 0, 1], + 'motion_outlier00': [1, 0, 0, 0, 0], + 'motion_outlier01': [0, 1, 0, 0, 0], + 'motion_outlier02': [0, 0, 0, 0, 1], } ) - spk_data = _spikes_test(fmt="spikes") + spk_data = _spikes_test(fmt='spikes') assert set(spk_data.columns) == set(outliers_spikes.columns) for col in outliers_spikes.columns: assert np.all(np.isclose(outliers_spikes[col], spk_data[col])) @@ -157,39 +156,39 @@ def test_spikes(): lags = [0, 1] outliers_lags = [1, 1, 1, 0, 1] spk_data = _spikes_test(lags=lags) - assert np.all(np.isclose(outliers_lags, spk_data["motion_outlier"])) + assert np.all(np.isclose(outliers_lags, spk_data['motion_outlier'])) mincontig = 2 outliers_mc = [1, 1, 1, 1, 1] spk_data = _spikes_test(lags=lags, mincontig=mincontig) - assert np.all(np.isclose(outliers_mc, spk_data["motion_outlier"])) + assert np.all(np.isclose(outliers_mc, spk_data['motion_outlier'])) def test_CompCorVariancePlot(): """CompCor variance report test""" - metadata_file = os.path.join(datadir, "confounds_metadata_test.tsv") - cc_rpt = CompCorVariancePlot( - metadata_files=[metadata_file], metadata_sources=["aCompCor"] - ) - _smoke_test_report(cc_rpt, "compcor_variance.svg") + metadata_file = os.path.join(datadir, 'confounds_metadata_test.tsv') + cc_rpt = CompCorVariancePlot(metadata_files=[metadata_file], metadata_sources=['aCompCor']) + _smoke_test_report(cc_rpt, 'compcor_variance.svg') def test_ConfoundsCorrelationPlot(): """confounds correlation report test""" - confounds_file = os.path.join(datadir, "confounds_test.tsv") + confounds_file = os.path.join(datadir, 'confounds_test.tsv') cc_rpt = ConfoundsCorrelationPlot( - confounds_file=confounds_file, reference_column="a", ignore_initial_volumes=1, + confounds_file=confounds_file, + reference_column='a', + ignore_initial_volumes=1, ) - _smoke_test_report(cc_rpt, "confounds_correlation.svg") + _smoke_test_report(cc_rpt, 'confounds_correlation.svg') def test_ConfoundsCorrelationPlotColumns(): """confounds correlation report test""" - confounds_file = os.path.join(datadir, "confounds_test.tsv") + confounds_file = os.path.join(datadir, 'confounds_test.tsv') cc_rpt = ConfoundsCorrelationPlot( confounds_file=confounds_file, - reference_column="a", - columns=["b", "d", "f"], + reference_column='a', + columns=['b', 'd', 'f'], ignore_initial_volumes=0, ) - _smoke_test_report(cc_rpt, "confounds_correlation_cols.svg") + _smoke_test_report(cc_rpt, 'confounds_correlation_cols.svg') diff --git a/niworkflows/tests/test_registration.py b/niworkflows/tests/test_registration.py index 88346372265..f5f80a8294b 100644 --- a/niworkflows/tests/test_registration.py +++ b/niworkflows/tests/test_registration.py @@ -20,57 +20,59 @@ # # https://www.nipreps.org/community/licensing/ # -""" Registration tests """ +"""Registration tests""" + import os from shutil import copy -import pytest from tempfile import TemporaryDirectory +import pytest from nipype.pipeline import engine as pe + from ..interfaces.reportlets.registration import ( FLIRTRPT, - SpatialNormalizationRPT, ANTSRegistrationRPT, + ApplyXFMRPT, BBRegisterRPT, MRICoregRPT, - ApplyXFMRPT, SimpleBeforeAfterRPT, + SpatialNormalizationRPT, ) -from ..testing import has_fsl, has_freesurfer +from ..testing import has_freesurfer, has_fsl from .conftest import _run_interface_mock, datadir def _smoke_test_report(report_interface, artifact_name): with TemporaryDirectory() as tmpdir: - res = pe.Node(report_interface, name="smoke_test", base_dir=tmpdir).run() + res = pe.Node(report_interface, name='smoke_test', base_dir=tmpdir).run() out_report = res.outputs.out_report - save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False) + save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False) if save_artifacts: copy(out_report, os.path.join(save_artifacts, artifact_name)) - assert os.path.isfile(out_report), "Report does not exist" + assert os.path.isfile(out_report), 'Report does not exist' -@pytest.mark.skipif(not has_fsl, reason="No FSL") +@pytest.mark.skipif(not has_fsl, reason='No FSL') def test_FLIRTRPT(reference, moving): - """ the FLIRT report capable test """ + """the FLIRT report capable test""" flirt_rpt = FLIRTRPT(generate_report=True, in_file=moving, reference=reference) - _smoke_test_report(flirt_rpt, "testFLIRT.svg") + _smoke_test_report(flirt_rpt, 'testFLIRT.svg') -@pytest.mark.skipif(not has_freesurfer, reason="No FreeSurfer") +@pytest.mark.skipif(not has_freesurfer, reason='No FreeSurfer') def test_MRICoregRPT(monkeypatch, reference, moving, nthreads): - """ the MRICoreg report capable test """ + """the MRICoreg report capable test""" def _agg(objekt, runtime): outputs = objekt.output_spec() - outputs.out_lta_file = os.path.join(datadir, "testMRICoregRPT-out_lta_file.lta") + outputs.out_lta_file = os.path.join(datadir, 'testMRICoregRPT-out_lta_file.lta') outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report) return outputs # Patch the _run_interface method - monkeypatch.setattr(MRICoregRPT, "_run_interface", _run_interface_mock) - monkeypatch.setattr(MRICoregRPT, "aggregate_outputs", _agg) + monkeypatch.setattr(MRICoregRPT, '_run_interface', _run_interface_mock) + monkeypatch.setattr(MRICoregRPT, 'aggregate_outputs', _agg) mri_coreg_rpt = MRICoregRPT( generate_report=True, @@ -78,12 +80,12 @@ def _agg(objekt, runtime): reference_file=reference, num_threads=nthreads, ) - _smoke_test_report(mri_coreg_rpt, "testMRICoreg.svg") + _smoke_test_report(mri_coreg_rpt, 'testMRICoreg.svg') -@pytest.mark.skipif(not has_fsl, reason="No FSL") +@pytest.mark.skipif(not has_fsl, reason='No FSL') def test_ApplyXFMRPT(reference, moving): - """ the ApplyXFM report capable test """ + """the ApplyXFM report capable test""" flirt_rpt = FLIRTRPT(generate_report=False, in_file=moving, reference=reference) applyxfm_rpt = ApplyXFMRPT( @@ -93,127 +95,117 @@ def test_ApplyXFMRPT(reference, moving): reference=reference, apply_xfm=True, ) - _smoke_test_report(applyxfm_rpt, "testApplyXFM.svg") + _smoke_test_report(applyxfm_rpt, 'testApplyXFM.svg') -@pytest.mark.skipif(not has_fsl, reason="No FSL") +@pytest.mark.skipif(not has_fsl, reason='No FSL') def test_SimpleBeforeAfterRPT(reference, moving): - """ the SimpleBeforeAfterRPT report capable test """ + """the SimpleBeforeAfterRPT report capable test""" flirt_rpt = FLIRTRPT(generate_report=False, in_file=moving, reference=reference) ba_rpt = SimpleBeforeAfterRPT( generate_report=True, before=reference, after=flirt_rpt.run().outputs.out_file ) - _smoke_test_report(ba_rpt, "test_SimpleBeforeAfterRPT.svg") + _smoke_test_report(ba_rpt, 'test_SimpleBeforeAfterRPT.svg') -@pytest.mark.skipif(not has_fsl, reason="No FSL") +@pytest.mark.skipif(not has_fsl, reason='No FSL') def test_FLIRTRPT_w_BBR(reference, reference_mask, moving): - """ test FLIRTRPT with input `wm_seg` set. - For the sake of testing ONLY, `wm_seg` is set to the filename of a brain mask """ + """test FLIRTRPT with input `wm_seg` set. + For the sake of testing ONLY, `wm_seg` is set to the filename of a brain mask""" flirt_rpt = FLIRTRPT( generate_report=True, in_file=moving, reference=reference, wm_seg=reference_mask ) - _smoke_test_report(flirt_rpt, "testFLIRTRPTBBR.svg") + _smoke_test_report(flirt_rpt, 'testFLIRTRPTBBR.svg') -@pytest.mark.skipif(not has_freesurfer, reason="No FreeSurfer") +@pytest.mark.skipif(not has_freesurfer, reason='No FreeSurfer') def test_BBRegisterRPT(monkeypatch, moving): - """ the BBRegister report capable test """ + """the BBRegister report capable test""" def _agg(objekt, runtime): outputs = objekt.output_spec() - outputs.out_lta_file = os.path.join( - datadir, "testBBRegisterRPT-out_lta_file.lta" - ) + outputs.out_lta_file = os.path.join(datadir, 'testBBRegisterRPT-out_lta_file.lta') outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report) return outputs # Patch the _run_interface method - monkeypatch.setattr(BBRegisterRPT, "_run_interface", _run_interface_mock) - monkeypatch.setattr(BBRegisterRPT, "aggregate_outputs", _agg) + monkeypatch.setattr(BBRegisterRPT, '_run_interface', _run_interface_mock) + monkeypatch.setattr(BBRegisterRPT, 'aggregate_outputs', _agg) - subject_id = "fsaverage" + subject_id = 'fsaverage' bbregister_rpt = BBRegisterRPT( generate_report=True, - contrast_type="t1", - init="fsl", + contrast_type='t1', + init='fsl', source_file=moving, subject_id=subject_id, registered_file=True, ) - _smoke_test_report(bbregister_rpt, "testBBRegister.svg") + _smoke_test_report(bbregister_rpt, 'testBBRegister.svg') def test_SpatialNormalizationRPT(monkeypatch, moving): - """ the SpatialNormalizationRPT report capable test """ + """the SpatialNormalizationRPT report capable test""" def _agg(objekt, runtime): outputs = objekt.output_spec() outputs.warped_image = os.path.join( - datadir, "testSpatialNormalizationRPTMovingWarpedImage.nii.gz" + datadir, 'testSpatialNormalizationRPTMovingWarpedImage.nii.gz' ) outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report) return outputs # Patch the _run_interface method - monkeypatch.setattr( - SpatialNormalizationRPT, "_run_interface", _run_interface_mock - ) - monkeypatch.setattr(SpatialNormalizationRPT, "aggregate_outputs", _agg) + monkeypatch.setattr(SpatialNormalizationRPT, '_run_interface', _run_interface_mock) + monkeypatch.setattr(SpatialNormalizationRPT, 'aggregate_outputs', _agg) - ants_rpt = SpatialNormalizationRPT( - generate_report=True, moving_image=moving, flavor="testing" - ) - _smoke_test_report(ants_rpt, "testSpatialNormalizationRPT.svg") + ants_rpt = SpatialNormalizationRPT(generate_report=True, moving_image=moving, flavor='testing') + _smoke_test_report(ants_rpt, 'testSpatialNormalizationRPT.svg') def test_SpatialNormalizationRPT_masked(monkeypatch, moving, reference_mask): - """ the SpatialNormalizationRPT report capable test with masking """ + """the SpatialNormalizationRPT report capable test with masking""" def _agg(objekt, runtime): outputs = objekt.output_spec() outputs.warped_image = os.path.join( - datadir, "testSpatialNormalizationRPTMovingWarpedImage.nii.gz" + datadir, 'testSpatialNormalizationRPTMovingWarpedImage.nii.gz' ) outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report) return outputs # Patch the _run_interface method - monkeypatch.setattr( - SpatialNormalizationRPT, "_run_interface", _run_interface_mock - ) - monkeypatch.setattr(SpatialNormalizationRPT, "aggregate_outputs", _agg) + monkeypatch.setattr(SpatialNormalizationRPT, '_run_interface', _run_interface_mock) + monkeypatch.setattr(SpatialNormalizationRPT, 'aggregate_outputs', _agg) ants_rpt = SpatialNormalizationRPT( generate_report=True, moving_image=moving, reference_mask=reference_mask, - flavor="testing", + flavor='testing', ) - _smoke_test_report(ants_rpt, "testSpatialNormalizationRPT_masked.svg") + _smoke_test_report(ants_rpt, 'testSpatialNormalizationRPT_masked.svg') def test_ANTSRegistrationRPT(monkeypatch, reference, moving): - """ the SpatialNormalizationRPT report capable test """ + """the SpatialNormalizationRPT report capable test""" from niworkflows import data def _agg(objekt, runtime): outputs = objekt.output_spec() - outputs.warped_image = os.path.join( - datadir, "testANTSRegistrationRPT-warped_image.nii.gz" - ) + outputs.warped_image = os.path.join(datadir, 'testANTSRegistrationRPT-warped_image.nii.gz') outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report) return outputs # Patch the _run_interface method - monkeypatch.setattr(ANTSRegistrationRPT, "_run_interface", _run_interface_mock) - monkeypatch.setattr(ANTSRegistrationRPT, "aggregate_outputs", _agg) + monkeypatch.setattr(ANTSRegistrationRPT, '_run_interface', _run_interface_mock) + monkeypatch.setattr(ANTSRegistrationRPT, 'aggregate_outputs', _agg) ants_rpt = ANTSRegistrationRPT( generate_report=True, moving_image=moving, fixed_image=reference, - from_file=data.load("t1w-mni_registration_testing_000.json"), + from_file=data.load('t1w-mni_registration_testing_000.json'), ) - _smoke_test_report(ants_rpt, "testANTSRegistrationRPT.svg") + _smoke_test_report(ants_rpt, 'testANTSRegistrationRPT.svg') diff --git a/niworkflows/tests/test_segmentation.py b/niworkflows/tests/test_segmentation.py index 8022452ae55..2f3abac336b 100644 --- a/niworkflows/tests/test_segmentation.py +++ b/niworkflows/tests/test_segmentation.py @@ -20,56 +20,58 @@ # # https://www.nipreps.org/community/licensing/ # -""" Segmentation tests """ +"""Segmentation tests""" + import os from shutil import copy from tempfile import TemporaryDirectory + import pytest +from nipype.pipeline import engine as pe from templateflow.api import get as get_template -from nipype.pipeline import engine as pe -from ..interfaces.reportlets.segmentation import FASTRPT, ReconAllRPT from ..interfaces.reportlets.masks import ( BETRPT, BrainExtractionRPT, - SimpleShowMaskRPT, ROIsPlot, + SimpleShowMaskRPT, ) -from ..testing import has_fsl, has_freesurfer +from ..interfaces.reportlets.segmentation import FASTRPT, ReconAllRPT +from ..testing import has_freesurfer, has_fsl from .conftest import _run_interface_mock, datadir def _smoke_test_report(report_interface, artifact_name): with TemporaryDirectory() as tmpdir: - res = pe.Node(report_interface, name="smoke_test", base_dir=tmpdir).run() + res = pe.Node(report_interface, name='smoke_test', base_dir=tmpdir).run() out_report = res.outputs.out_report - save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False) + save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False) if save_artifacts: copy(out_report, os.path.join(save_artifacts, artifact_name)) - assert os.path.isfile(out_report), 'Report "%s" does not exist' % out_report + assert os.path.isfile(out_report), f'Report "{out_report}" does not exist' -@pytest.mark.skipif(not has_fsl, reason="No FSL") +@pytest.mark.skipif(not has_fsl, reason='No FSL') def test_BETRPT(moving): - """ the BET report capable test """ + """the BET report capable test""" bet_rpt = BETRPT(generate_report=True, in_file=moving) - _smoke_test_report(bet_rpt, "testBET.svg") + _smoke_test_report(bet_rpt, 'testBET.svg') def test_ROIsPlot(tmp_path): - """ the BET report capable test """ + """the BET report capable test""" import nibabel as nb import numpy as np im = nb.load( str( get_template( - "OASIS30ANTs", + 'OASIS30ANTs', resolution=1, - desc="4", - suffix="dseg", - extension=[".nii", ".nii.gz"], + desc='4', + suffix='dseg', + extension=['.nii', '.nii.gz'], ) ) ) @@ -80,37 +82,35 @@ def test_ROIsPlot(tmp_path): lookup[4] = 3 newdata = lookup[np.round(im.get_fdata()).astype(int)] hdr = im.header.copy() - hdr.set_data_dtype("int16") - hdr["scl_slope"] = 1 - hdr["scl_inter"] = 0 - out_file = str(tmp_path / "segments.nii.gz") + hdr.set_data_dtype('int16') + hdr['scl_slope'] = 1 + hdr['scl_inter'] = 0 + out_file = str(tmp_path / 'segments.nii.gz') nb.Nifti1Image(newdata, im.affine, hdr).to_filename(out_file) roi_rpt = ROIsPlot( generate_report=True, - in_file=str(get_template("OASIS30ANTs", resolution=1, desc=None, suffix="T1w")), - in_mask=str( - get_template("OASIS30ANTs", resolution=1, desc="brain", suffix="mask") - ), + in_file=str(get_template('OASIS30ANTs', resolution=1, desc=None, suffix='T1w')), + in_mask=str(get_template('OASIS30ANTs', resolution=1, desc='brain', suffix='mask')), in_rois=[out_file], levels=[1.5, 2.5, 3.5], - colors=["gold", "magenta", "b"], + colors=['gold', 'magenta', 'b'], ) - _smoke_test_report(roi_rpt, "testROIsPlot.svg") + _smoke_test_report(roi_rpt, 'testROIsPlot.svg') def test_ROIsPlot2(tmp_path): - """ the BET report capable test """ + """the BET report capable test""" import nibabel as nb import numpy as np im = nb.load( str( get_template( - "OASIS30ANTs", + 'OASIS30ANTs', resolution=1, - desc="4", - suffix="dseg", - extension=[".nii", ".nii.gz"], + desc='4', + suffix='dseg', + extension=['.nii', '.nii.gz'], ) ) ) @@ -121,115 +121,107 @@ def test_ROIsPlot2(tmp_path): lookup[4] = 3 newdata = lookup[np.round(im.get_fdata()).astype(int)] hdr = im.header.copy() - hdr.set_data_dtype("int16") - hdr["scl_slope"] = 1 - hdr["scl_inter"] = 0 + hdr.set_data_dtype('int16') + hdr['scl_slope'] = 1 + hdr['scl_inter'] = 0 out_files = [] for i in range(1, 5): - seg = np.zeros_like(newdata, dtype="uint8") + seg = np.zeros_like(newdata, dtype='uint8') seg[(newdata > 0) & (newdata <= i)] = 1 - out_file = str(tmp_path / ("segments%02d.nii.gz" % i)) + out_file = str(tmp_path / ('segments%02d.nii.gz' % i)) nb.Nifti1Image(seg, im.affine, hdr).to_filename(out_file) out_files.append(out_file) roi_rpt = ROIsPlot( generate_report=True, - in_file=str(get_template("OASIS30ANTs", resolution=1, desc=None, suffix="T1w")), - in_mask=str( - get_template("OASIS30ANTs", resolution=1, desc="brain", suffix="mask") - ), + in_file=str(get_template('OASIS30ANTs', resolution=1, desc=None, suffix='T1w')), + in_mask=str(get_template('OASIS30ANTs', resolution=1, desc='brain', suffix='mask')), in_rois=out_files, - colors=["gold", "lightblue", "b", "g"], + colors=['gold', 'lightblue', 'b', 'g'], ) - _smoke_test_report(roi_rpt, "testROIsPlot2.svg") + _smoke_test_report(roi_rpt, 'testROIsPlot2.svg') def test_SimpleShowMaskRPT(): - """ the BET report capable test """ + """the BET report capable test""" msk_rpt = SimpleShowMaskRPT( generate_report=True, - background_file=str( - get_template("OASIS30ANTs", resolution=1, desc=None, suffix="T1w") - ), + background_file=str(get_template('OASIS30ANTs', resolution=1, desc=None, suffix='T1w')), mask_file=str( get_template( - "OASIS30ANTs", + 'OASIS30ANTs', resolution=1, - desc="BrainCerebellumRegistration", - suffix="mask", + desc='BrainCerebellumRegistration', + suffix='mask', ) ), ) - _smoke_test_report(msk_rpt, "testSimpleMask.svg") + _smoke_test_report(msk_rpt, 'testSimpleMask.svg') def test_BrainExtractionRPT(monkeypatch, moving, nthreads): - """ test antsBrainExtraction with reports""" + """test antsBrainExtraction with reports""" def _agg(objekt, runtime): outputs = objekt.output_spec() outputs.BrainExtractionMask = os.path.join( - datadir, "testBrainExtractionRPTBrainExtractionMask.nii.gz" + datadir, 'testBrainExtractionRPTBrainExtractionMask.nii.gz' ) outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report) return outputs # Patch the _run_interface method - monkeypatch.setattr(BrainExtractionRPT, "_run_interface", _run_interface_mock) - monkeypatch.setattr(BrainExtractionRPT, "aggregate_outputs", _agg) + monkeypatch.setattr(BrainExtractionRPT, '_run_interface', _run_interface_mock) + monkeypatch.setattr(BrainExtractionRPT, 'aggregate_outputs', _agg) bex_rpt = BrainExtractionRPT( generate_report=True, dimension=3, use_floatingpoint_precision=1, anatomical_image=moving, - brain_template=str( - get_template("OASIS30ANTs", resolution=1, desc=None, suffix="T1w") - ), + brain_template=str(get_template('OASIS30ANTs', resolution=1, desc=None, suffix='T1w')), brain_probability_mask=str( - get_template("OASIS30ANTs", resolution=1, label="brain", suffix="probseg") + get_template('OASIS30ANTs', resolution=1, label='brain', suffix='probseg') ), extraction_registration_mask=str( get_template( - "OASIS30ANTs", + 'OASIS30ANTs', resolution=1, - desc="BrainCerebellumRegistration", - suffix="mask", + desc='BrainCerebellumRegistration', + suffix='mask', ) ), - out_prefix="testBrainExtractionRPT", + out_prefix='testBrainExtractionRPT', debug=True, # run faster for testing purposes num_threads=nthreads, ) - _smoke_test_report(bex_rpt, "testANTSBrainExtraction.svg") + _smoke_test_report(bex_rpt, 'testANTSBrainExtraction.svg') -@pytest.mark.skipif(not has_fsl, reason="No FSL") -@pytest.mark.parametrize("segments", [True, False]) +@pytest.mark.skipif(not has_fsl, reason='No FSL') +@pytest.mark.parametrize('segments', [True, False]) def test_FASTRPT(monkeypatch, segments, reference, reference_mask): - """ test FAST with the two options for segments """ + """test FAST with the two options for segments""" from nipype.interfaces.fsl.maths import ApplyMask def _agg(objekt, runtime): outputs = objekt.output_spec() outputs.out_report = os.path.join(runtime.cwd, objekt.inputs.out_report) - outputs.tissue_class_map = os.path.join( - datadir, "testFASTRPT-tissue_class_map.nii.gz" - ) + outputs.tissue_class_map = os.path.join(datadir, 'testFASTRPT-tissue_class_map.nii.gz') outputs.tissue_class_files = [ - os.path.join(datadir, "testFASTRPT-tissue_class_files0.nii.gz"), - os.path.join(datadir, "testFASTRPT-tissue_class_files1.nii.gz"), - os.path.join(datadir, "testFASTRPT-tissue_class_files2.nii.gz"), + os.path.join(datadir, 'testFASTRPT-tissue_class_files0.nii.gz'), + os.path.join(datadir, 'testFASTRPT-tissue_class_files1.nii.gz'), + os.path.join(datadir, 'testFASTRPT-tissue_class_files2.nii.gz'), ] return outputs # Patch the _run_interface method - monkeypatch.setattr(FASTRPT, "_run_interface", _run_interface_mock) - monkeypatch.setattr(FASTRPT, "aggregate_outputs", _agg) + monkeypatch.setattr(FASTRPT, '_run_interface', _run_interface_mock) + monkeypatch.setattr(FASTRPT, 'aggregate_outputs', _agg) brain = ( - pe.Node(ApplyMask(in_file=reference, mask_file=reference_mask), name="brain") + pe.Node(ApplyMask(in_file=reference, mask_file=reference_mask), name='brain') .run() .outputs.out_file ) @@ -239,21 +231,21 @@ def _agg(objekt, runtime): no_bias=True, probability_maps=True, segments=segments, - out_basename="test", + out_basename='test', ) - _smoke_test_report(fast_rpt, "testFAST_%ssegments.svg" % ("no" * int(not segments))) + _smoke_test_report(fast_rpt, 'testFAST_%ssegments.svg' % ('no' * int(not segments))) -@pytest.mark.skipif(not has_freesurfer, reason="No FreeSurfer") +@pytest.mark.skipif(not has_freesurfer, reason='No FreeSurfer') def test_ReconAllRPT(monkeypatch): # Patch the _run_interface method - monkeypatch.setattr(ReconAllRPT, "_run_interface", _run_interface_mock) + monkeypatch.setattr(ReconAllRPT, '_run_interface', _run_interface_mock) rall_rpt = ReconAllRPT( - subject_id="fsaverage", - directive="all", - subjects_dir=os.getenv("SUBJECTS_DIR"), + subject_id='fsaverage', + directive='all', + subjects_dir=os.getenv('SUBJECTS_DIR'), generate_report=True, ) - _smoke_test_report(rall_rpt, "testReconAll.svg") + _smoke_test_report(rall_rpt, 'testReconAll.svg') diff --git a/niworkflows/tests/test_utils.py b/niworkflows/tests/test_utils.py index 5f69bfaa534..06810604648 100644 --- a/niworkflows/tests/test_utils.py +++ b/niworkflows/tests/test_utils.py @@ -20,41 +20,42 @@ # # https://www.nipreps.org/community/licensing/ # -""" Utilities tests """ +"""Utilities tests""" import os -from templateflow.api import get as get_template -from niworkflows.interfaces.reportlets.masks import SimpleShowMaskRPT -from nipype.pipeline import engine as pe +from shutil import which import pytest -from shutil import which +from nipype.pipeline import engine as pe +from templateflow.api import get as get_template + +from niworkflows.interfaces.reportlets.masks import SimpleShowMaskRPT @pytest.mark.skipif( - which("svgo") is None or which("cwebp") is None, reason="svgo or cwebp missing" + which('svgo') is None or which('cwebp') is None, reason='svgo or cwebp missing' ) def test_compression(tmp_path): - """ the BET report capable test """ + """the BET report capable test""" uncompressed = ( pe.Node( SimpleShowMaskRPT( generate_report=True, background_file=str( - get_template("OASIS30ANTs", resolution=1, desc=None, suffix="T1w") + get_template('OASIS30ANTs', resolution=1, desc=None, suffix='T1w') ), mask_file=str( get_template( - "OASIS30ANTs", + 'OASIS30ANTs', resolution=1, - desc="BrainCerebellumRegistration", - suffix="mask", + desc='BrainCerebellumRegistration', + suffix='mask', ) ), compress_report=False, ), - name="uncompressed", + name='uncompressed', base_dir=str(tmp_path), ) .run() @@ -66,19 +67,19 @@ def test_compression(tmp_path): SimpleShowMaskRPT( generate_report=True, background_file=str( - get_template("OASIS30ANTs", resolution=1, desc=None, suffix="T1w") + get_template('OASIS30ANTs', resolution=1, desc=None, suffix='T1w') ), mask_file=str( get_template( - "OASIS30ANTs", + 'OASIS30ANTs', resolution=1, - desc="BrainCerebellumRegistration", - suffix="mask", + desc='BrainCerebellumRegistration', + suffix='mask', ) ), compress_report=True, ), - name="compressed", + name='compressed', base_dir=str(tmp_path), ) .run() @@ -88,6 +89,6 @@ def test_compression(tmp_path): size = int(os.stat(uncompressed).st_size) size_compress = int(os.stat(compressed).st_size) assert size >= size_compress, ( - "The uncompressed report is smaller (%d)" - "than the compressed report (%d)" % (size, size_compress) + 'The uncompressed report is smaller (%d)' + 'than the compressed report (%d)' % (size, size_compress) ) diff --git a/niworkflows/tests/test_viz.py b/niworkflows/tests/test_viz.py index 8bb1cdce84f..fe9d6a796c3 100644 --- a/niworkflows/tests/test_viz.py +++ b/niworkflows/tests/test_viz.py @@ -21,46 +21,50 @@ # https://www.nipreps.org/community/licensing/ # """Test viz module""" + import os from pathlib import Path -import numpy as np import nibabel as nb +import numpy as np import pandas as pd import pytest +from niworkflows.interfaces.plotting import _get_tr +from niworkflows.utils.timeseries import _cifti_timeseries, _nifti_timeseries +from niworkflows.viz.plots import fMRIPlot + +from .. import viz from .conftest import datadir from .generate_data import _create_dtseries_cifti -from .. import viz -from niworkflows.viz.plots import fMRIPlot -from niworkflows.utils.timeseries import _cifti_timeseries, _nifti_timeseries -from niworkflows.interfaces.plotting import _get_tr -@pytest.mark.parametrize("tr", (None, 0.7)) -@pytest.mark.parametrize("sorting", (None, "ward", "linkage")) +@pytest.mark.parametrize('tr', [None, 0.7]) +@pytest.mark.parametrize('sorting', [None, 'ward', 'linkage']) def test_carpetplot(tr, sorting): """Write a carpetplot""" - save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False) + save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False) rng = np.random.default_rng(2010) viz.plot_carpet( rng.normal(100, 20, size=(18000, 1900)), - title="carpetplot with title", + title='carpetplot with title', tr=tr, output_file=( os.path.join( save_artifacts, f"carpet_nosegs_{'index' if tr is None else 'time'}_" - f"{'nosort' if sorting is None else sorting}.svg" - ) if save_artifacts else None + f"{'nosort' if sorting is None else sorting}.svg", + ) + if save_artifacts + else None ), sort_rows=sorting, drop_trs=15, ) - labels = ("Ctx GM", "Subctx GM", "WM+CSF", "Cereb.", "Edge") + labels = ('Ctx GM', 'Subctx GM', 'WM+CSF', 'Cereb.', 'Edge') sizes = (200, 100, 50, 100, 50) total_size = np.sum(sizes) data = np.zeros((total_size, 300)) @@ -70,11 +74,9 @@ def test_carpetplot(tr, sorting): segments = {} start = 0 for group, size in zip(labels, sizes): - segments[group] = indexes[start:start + size] - data[indexes[start:start + size]] = rng.normal( - rng.standard_normal(1) * 100, - rng.normal(20, 5, size=1), - size=(size, 300) + segments[group] = indexes[start : start + size] + data[indexes[start : start + size]] = rng.normal( + rng.standard_normal(1) * 100, rng.normal(20, 5, size=1), size=(size, 300) ) start += size @@ -86,8 +88,10 @@ def test_carpetplot(tr, sorting): os.path.join( save_artifacts, f"carpet_random_{'index' if tr is None else 'seg'}_" - f"{'nosort' if sorting is None else sorting}.svg" - ) if save_artifacts else None + f"{'nosort' if sorting is None else sorting}.svg", + ) + if save_artifacts + else None ), sort_rows=sorting, ) @@ -98,8 +102,8 @@ def test_carpetplot(tr, sorting): segments = {} start = 0 for i, (group, size) in enumerate(zip(labels, sizes)): - segments[group] = indexes[start:start + size] - data[indexes[start:start + size]] = i + segments[group] = indexes[start : start + size] + data[indexes[start : start + size]] = i start += size viz.plot_carpet( @@ -111,51 +115,59 @@ def test_carpetplot(tr, sorting): os.path.join( save_artifacts, f"carpet_const_{'index' if tr is None else 'time'}_" - f"{'nosort' if sorting is None else sorting}.svg" - ) if save_artifacts else None + f"{'nosort' if sorting is None else sorting}.svg", + ) + if save_artifacts + else None ), sort_rows=sorting, ) -@pytest.mark.parametrize("input_files", [ - ("sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz", None), - ("sub-01_task-mixedgamblestask_run-02_space-fsLR_den-91k_bold.dtseries.nii", None), - ("sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz", - "sub-ds205s03_task-functionallocalizer_run-01_bold_parc.nii.gz"), -]) +@pytest.mark.parametrize( + 'input_files', + [ + ('sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz', None), + ('sub-01_task-mixedgamblestask_run-02_space-fsLR_den-91k_bold.dtseries.nii', None), + ( + 'sub-ds205s03_task-functionallocalizer_run-01_bold_volreg.nii.gz', + 'sub-ds205s03_task-functionallocalizer_run-01_bold_parc.nii.gz', + ), + ], +) def test_fmriplot(input_files): """Exercise the fMRIPlot class.""" - save_artifacts = os.getenv("SAVE_CIRCLE_ARTIFACTS", False) + save_artifacts = os.getenv('SAVE_CIRCLE_ARTIFACTS', False) rng = np.random.default_rng(2010) in_file = os.path.join(datadir, input_files[0]) seg_file = os.path.join(datadir, input_files[1]) if input_files[1] is not None else None - dtype = "nifti" if input_files[0].endswith("volreg.nii.gz") else "cifti" - has_seg = "_parc" if seg_file else "" + dtype = 'nifti' if input_files[0].endswith('volreg.nii.gz') else 'cifti' + has_seg = '_parc' if seg_file else '' timeseries, segments = ( - _nifti_timeseries(in_file, seg_file) if dtype == "nifti" else - _cifti_timeseries(in_file) + _nifti_timeseries(in_file, seg_file) if dtype == 'nifti' else _cifti_timeseries(in_file) ) fig = fMRIPlot( timeseries, segments, tr=_get_tr(nb.load(in_file)), - confounds=pd.DataFrame({ - "outliers": rng.normal(0.2, 0.2, timeseries.shape[-1] - 1), - "DVARS": rng.normal(0.2, 0.2, timeseries.shape[-1] - 1), - "FD": rng.normal(0.2, 0.2, timeseries.shape[-1] - 1), - }), - units={"FD": "mm"}, - paired_carpet=dtype == "cifti", + confounds=pd.DataFrame( + { + 'outliers': rng.normal(0.2, 0.2, timeseries.shape[-1] - 1), + 'DVARS': rng.normal(0.2, 0.2, timeseries.shape[-1] - 1), + 'FD': rng.normal(0.2, 0.2, timeseries.shape[-1] - 1), + } + ), + units={'FD': 'mm'}, + paired_carpet=dtype == 'cifti', ).plot() if save_artifacts: fig.savefig( - os.path.join(save_artifacts, f"fmriplot_{dtype}{has_seg}.svg"), - bbox_inches="tight", + os.path.join(save_artifacts, f'fmriplot_{dtype}{has_seg}.svg'), + bbox_inches='tight', ) @@ -164,47 +176,47 @@ def test_plot_melodic_components(tmp_path): import numpy as np # save the artifacts - out_dir = Path(os.getenv("SAVE_CIRCLE_ARTIFACTS", str(tmp_path))) - all_noise = str(out_dir / "melodic_all_noise.svg") - no_noise = str(out_dir / "melodic_no_noise.svg") - no_classified = str(out_dir / "melodic_no_classified.svg") + out_dir = Path(os.getenv('SAVE_CIRCLE_ARTIFACTS', str(tmp_path))) + all_noise = str(out_dir / 'melodic_all_noise.svg') + no_noise = str(out_dir / 'melodic_no_noise.svg') + no_classified = str(out_dir / 'melodic_no_classified.svg') # melodic directory - melodic_dir = tmp_path / "melodic" + melodic_dir = tmp_path / 'melodic' melodic_dir.mkdir(exist_ok=True) # melodic_mix mel_mix = np.random.randint(low=-5, high=5, size=[10, 2]) - mel_mix_file = str(melodic_dir / "melodic_mix") - np.savetxt(mel_mix_file, mel_mix, fmt="%i") + mel_mix_file = str(melodic_dir / 'melodic_mix') + np.savetxt(mel_mix_file, mel_mix, fmt='%i') # melodic_FTmix mel_ftmix = np.random.rand(2, 5) - mel_ftmix_file = str(melodic_dir / "melodic_FTmix") + mel_ftmix_file = str(melodic_dir / 'melodic_FTmix') np.savetxt(mel_ftmix_file, mel_ftmix) # melodic_ICstats mel_icstats = np.random.rand(2, 2) - mel_icstats_file = str(melodic_dir / "melodic_ICstats") + mel_icstats_file = str(melodic_dir / 'melodic_ICstats') np.savetxt(mel_icstats_file, mel_icstats) # melodic_IC mel_ic = np.random.rand(2, 2, 2, 2) - mel_ic_file = str(melodic_dir / "melodic_IC.nii.gz") + mel_ic_file = str(melodic_dir / 'melodic_IC.nii.gz') mel_ic_img = nb.Nifti2Image(mel_ic, np.eye(4)) mel_ic_img.to_filename(mel_ic_file) # noise_components noise_comps = np.array([1, 2]) - noise_comps_file = str(tmp_path / "noise_ics.csv") - np.savetxt(noise_comps_file, noise_comps, fmt="%i", delimiter=",") + noise_comps_file = str(tmp_path / 'noise_ics.csv') + np.savetxt(noise_comps_file, noise_comps, fmt='%i', delimiter=',') # create empty components file - nocomps_file = str(tmp_path / "noise_none.csv") - open(nocomps_file, "w").close() + nocomps_file = str(tmp_path / 'noise_none.csv') + open(nocomps_file, 'w').close() # in_file - in_fname = str(tmp_path / "in_file.nii.gz") + in_fname = str(tmp_path / 'in_file.nii.gz') voxel_ts = np.random.rand(2, 2, 2, 10) in_file = nb.Nifti2Image(voxel_ts, np.eye(4)) in_file.to_filename(in_fname) # report_mask - report_fname = str(tmp_path / "report_mask.nii.gz") + report_fname = str(tmp_path / 'report_mask.nii.gz') report_mask = nb.Nifti2Image(np.ones([2, 2, 2]), np.eye(4)) report_mask.to_filename(report_fname) @@ -239,9 +251,9 @@ def test_plot_melodic_components(tmp_path): def test_compcor_variance_plot(tmp_path): """Test plotting CompCor variance""" - out_dir = Path(os.getenv("SAVE_CIRCLE_ARTIFACTS", str(tmp_path))) - out_file = str(out_dir / "variance_plot_short.svg") - metadata_file = os.path.join(datadir, "confounds_metadata_short_test.tsv") + out_dir = Path(os.getenv('SAVE_CIRCLE_ARTIFACTS', str(tmp_path))) + out_file = str(out_dir / 'variance_plot_short.svg') + metadata_file = os.path.join(datadir, 'confounds_metadata_short_test.tsv') viz.plots.compcor_variance_plot([metadata_file], output_file=out_file) @@ -262,6 +274,6 @@ def create_surface_dtseries(): def test_cifti_surfaces_plot(tmp_path, create_surface_dtseries): """Test plotting CIFTI-2 surfaces""" os.chdir(tmp_path) - out_dir = Path(os.getenv("SAVE_CIRCLE_ARTIFACTS", str(tmp_path))) - out_file = str(out_dir / "cifti_surfaces_plot.svg") + out_dir = Path(os.getenv('SAVE_CIRCLE_ARTIFACTS', str(tmp_path))) + out_file = str(out_dir / 'cifti_surfaces_plot.svg') viz.plots.cifti_surfaces_plot(create_surface_dtseries, output_file=out_file) diff --git a/niworkflows/utils/bids.py b/niworkflows/utils/bids.py index fdcfbd586f3..d323464f8ce 100644 --- a/niworkflows/utils/bids.py +++ b/niworkflows/utils/bids.py @@ -21,26 +21,27 @@ # https://www.nipreps.org/community/licensing/ # """Helpers for handling BIDS-like neuroimaging structures.""" -from pathlib import Path + import json import re import warnings +from pathlib import Path + from bids import BIDSLayout from bids.layout import Query from packaging.version import Version - DEFAULT_BIDS_QUERIES = { - "bold": {"datatype": "func", "suffix": "bold", "part": ["mag", None]}, - "dwi": {"suffix": "dwi"}, - "flair": {"datatype": "anat", "suffix": "FLAIR", "part": ["mag", None]}, - "fmap": {"datatype": "fmap"}, - "pet": {"suffix": "pet"}, - "roi": {"datatype": "anat", "suffix": "roi"}, - "sbref": {"datatype": "func", "suffix": "sbref", "part": ["mag", None]}, - "t1w": {"datatype": "anat", "suffix": "T1w", "part": ["mag", None]}, - "t2w": {"datatype": "anat", "suffix": "T2w", "part": ["mag", None]}, - "asl": {"datatype": "perf", "suffix": "asl"}, + 'bold': {'datatype': 'func', 'suffix': 'bold', 'part': ['mag', None]}, + 'dwi': {'suffix': 'dwi'}, + 'flair': {'datatype': 'anat', 'suffix': 'FLAIR', 'part': ['mag', None]}, + 'fmap': {'datatype': 'fmap'}, + 'pet': {'suffix': 'pet'}, + 'roi': {'datatype': 'anat', 'suffix': 'roi'}, + 'sbref': {'datatype': 'func', 'suffix': 'sbref', 'part': ['mag', None]}, + 't1w': {'datatype': 'anat', 'suffix': 'T1w', 'part': ['mag', None]}, + 't2w': {'datatype': 'anat', 'suffix': 'T2w', 'part': ['mag', None]}, + 'asl': {'datatype': 'perf', 'suffix': 'asl'}, } @@ -48,13 +49,13 @@ class BIDSError(ValueError): def __init__(self, message, bids_root): indent = 10 header = '{sep} BIDS root folder: "{bids_root}" {sep}'.format( - bids_root=bids_root, sep="".join(["-"] * indent) + bids_root=bids_root, sep=''.join(['-'] * indent) ) - self.msg = "\n{header}\n{indent}{message}\n{footer}".format( + self.msg = '\n{header}\n{indent}{message}\n{footer}'.format( header=header, - indent="".join([" "] * (indent + 1)), + indent=''.join([' '] * (indent + 1)), message=message, - footer="".join(["-"] * len(header)), + footer=''.join(['-'] * len(header)), ) super().__init__(self.msg) self.bids_root = bids_root @@ -64,9 +65,7 @@ class BIDSWarning(RuntimeWarning): pass -def collect_participants( - bids_dir, participant_label=None, strict=False, bids_validate=True -): +def collect_participants(bids_dir, participant_label=None, strict=False, bids_validate=True): """ List the participants under the BIDS root and checks that participants designated with the participant_label argument exist in that folder. @@ -118,11 +117,11 @@ def collect_participants( # Error: bids_dir does not contain subjects if not all_participants: raise BIDSError( - "Could not find participants. Please make sure the BIDS data " - "structure is present and correct. Datasets can be validated " - "online using the BIDS Validator " - "(https://bids-standard.github.io/bids-validator/).\n" - "If you are using Docker for Mac or Docker for Windows, you " + 'Could not find participants. Please make sure the BIDS data ' + 'structure is present and correct. Datasets can be validated ' + 'online using the BIDS Validator ' + '(https://bids-standard.github.io/bids-validator/).\n' + 'If you are using Docker for Mac or Docker for Windows, you ' 'may need to adjust your "File sharing" preferences.', bids_dir, ) @@ -135,16 +134,14 @@ def collect_participants( participant_label = [participant_label] # Drop sub- prefixes - participant_label = [ - sub[4:] if sub.startswith("sub-") else sub for sub in participant_label - ] + participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label] # Remove duplicates participant_label = sorted(set(participant_label)) # Remove labels not found found_label = sorted(set(participant_label) & all_participants) if not found_label: raise BIDSError( - "Could not find participants [{}]".format(", ".join(participant_label)), + 'Could not find participants [{}]'.format(', '.join(participant_label)), bids_dir, ) @@ -152,12 +149,12 @@ def collect_participants( notfound_label = sorted(set(participant_label) - all_participants) if notfound_label: exc = BIDSError( - "Some participants were not found: {}".format(", ".join(notfound_label)), + 'Some participants were not found: {}'.format(', '.join(notfound_label)), bids_dir, ) if strict: raise exc - warnings.warn(exc.msg, BIDSWarning) + warnings.warn(exc.msg, BIDSWarning, stacklevel=2) return found_label @@ -252,23 +249,18 @@ def collect_data( del layout_get_kwargs[entity] if task: - queries["bold"]["task"] = task + queries['bold']['task'] = task if echo: - queries["bold"]["echo"] = echo + queries['bold']['echo'] = echo subj_data = { - dtype: sorted(layout.get(**layout_get_kwargs, **query)) - for dtype, query in queries.items() + dtype: sorted(layout.get(**layout_get_kwargs, **query)) for dtype, query in queries.items() } # Special case: multi-echo BOLD, grouping echos - if ( - group_echos - and "bold" in subj_data - and any(["_echo-" in bold for bold in subj_data["bold"]]) - ): - subj_data["bold"] = group_multiecho(subj_data["bold"]) + if group_echos and 'bold' in subj_data and any('_echo-' in bold for bold in subj_data['bold']): + subj_data['bold'] = group_multiecho(subj_data['bold']) return subj_data, layout @@ -300,12 +292,12 @@ def _init_layout(in_file=None, bids_dir=None, validate=True, database_path=None) if bids_dir is None: in_file = Path(in_file) for parent in in_file.parents: - if parent.name.startswith("sub-"): + if parent.name.startswith('sub-'): bids_dir = parent.parent.resolve() break if bids_dir is None: - raise RuntimeError("Could not infer BIDS root") + raise RuntimeError('Could not infer BIDS root') layout = BIDSLayout( str(bids_dir), @@ -394,16 +386,16 @@ def group_multiecho(bold_sess): from itertools import groupby def _grp_echos(x): - if "_echo-" not in x: + if '_echo-' not in x: return x - echo = re.search("_echo-\\d*", x).group(0) - return x.replace(echo, "_echo-?") + echo = re.search('_echo-\\d*', x).group(0) + return x.replace(echo, '_echo-?') ses_uids = [] for _, bold in groupby(bold_sess, key=_grp_echos): bold = list(bold) # If single- or dual-echo, flatten list; keep list otherwise. - action = getattr(ses_uids, "append" if len(bold) > 2 else "extend") + action = getattr(ses_uids, 'append' if len(bold) > 2 else 'extend') action(bold) return ses_uids @@ -434,17 +426,17 @@ def relative_to_root(path): """ path = Path(path) - if path.name.startswith("sub-"): + if path.name.startswith('sub-'): parents = [path.name] for p in path.parents: parents.insert(0, p.name) - if p.name.startswith("sub-"): + if p.name.startswith('sub-'): return Path(*parents) return path raise ValueError( - f"Could not determine the BIDS root of <{path}>. " - "Only files under a subject directory are currently supported." + f'Could not determine the BIDS root of <{path}>. ' + 'Only files under a subject directory are currently supported.' ) @@ -485,6 +477,6 @@ def check_pipeline_version(cvers, data_desc): return desc = json.loads(data_desc.read_text()) - dvers = desc.get("PipelineDescription", {}).get("Version", "0+unknown") + dvers = desc.get('PipelineDescription', {}).get('Version', '0+unknown') if Version(cvers).public != Version(dvers).public: - return "Previous output generated by version {} found.".format(dvers) + return f'Previous output generated by version {dvers} found.' diff --git a/niworkflows/utils/connections.py b/niworkflows/utils/connections.py index f579ae83607..027e16249d0 100644 --- a/niworkflows/utils/connections.py +++ b/niworkflows/utils/connections.py @@ -29,8 +29,8 @@ """ __all__ = [ - "listify", - "pop_file", + 'listify', + 'pop_file', ] @@ -70,7 +70,9 @@ def listify(value): """ from pathlib import Path + from nipype.interfaces.base import isdefined + if not isdefined(value) or value is None: return value if isinstance(value, (str, bytes, Path)): diff --git a/niworkflows/utils/debug.py b/niworkflows/utils/debug.py index c207bbb0a8f..f7325e8c82e 100644 --- a/niworkflows/utils/debug.py +++ b/niworkflows/utils/debug.py @@ -57,13 +57,13 @@ def setup_exceptionhook(ipython=False): pdb.post_mortem; if not interactive, then invokes default handler. """ - def _pdb_excepthook(type, value, tb): + def _pdb_excepthook(tp, value, tb): import traceback - traceback.print_exception(type, value, tb) + traceback.print_exception(tp, value, tb) print() if is_interactive(): - import pdb + import pdb # noqa: T100 pdb.post_mortem(tb) @@ -71,7 +71,7 @@ def _pdb_excepthook(type, value, tb): from IPython.core import ultratb sys.excepthook = ultratb.FormattedTB( - mode="Verbose", + mode='Verbose', # color_scheme='Linux', call_pdb=is_interactive(), ) diff --git a/niworkflows/utils/images.py b/niworkflows/utils/images.py index c75a238b1b8..e74398cf0c8 100644 --- a/niworkflows/utils/images.py +++ b/niworkflows/utils/images.py @@ -21,9 +21,11 @@ # https://www.nipreps.org/community/licensing/ # """Utilities to manipulate images.""" + +from gzip import GzipFile + import nibabel as nb import numpy as np -from gzip import GzipFile def rotation2canonical(img): @@ -56,15 +58,15 @@ def unsafe_write_nifti_header_and_data(fname, header, data): If you're not using this for NIfTI files specifically, you're playing with Fortran-ordered fire. """ - with open(fname, "wb") as fobj: + with open(fname, 'wb') as fobj: # Avoid setting fname or mtime, for deterministic outputs - if str(fname).endswith(".gz"): - fobj = GzipFile("", "wb", 9, fobj, 0.0) + if str(fname).endswith('.gz'): + fobj = GzipFile('', 'wb', 9, fobj, 0.0) header.write_to(fobj) # This function serializes one block at a time to reduce memory usage a bit # It assumes Fortran-ordered data. nb.volumeutils.array_to_file(data, fobj, offset=header.get_data_offset()) - if str(fname).endswith(".gz"): + if str(fname).endswith('.gz'): fobj.close() @@ -82,11 +84,11 @@ def _copyxform(ref_image, out_image, message=None): if not np.allclose(orig.affine, resampled.affine): from nipype import logging - logging.getLogger("nipype.interface").debug( - "Affines of input and reference images do not match, " - "FMRIPREP will set the reference image headers. " - "Please, check that the x-form matrices of the input dataset" - "are correct and manually verify the alignment of results." + logging.getLogger('nipype.interface').debug( + 'Affines of input and reference images do not match, ' + 'FMRIPREP will set the reference image headers. ' + 'Please, check that the x-form matrices of the input dataset' + 'are correct and manually verify the alignment of results.' ) # Copy xform infos @@ -95,7 +97,7 @@ def _copyxform(ref_image, out_image, message=None): header = resampled.header.copy() header.set_qform(qform, int(qform_code)) header.set_sform(sform, int(sform_code)) - header["descrip"] = "xform matrices modified by %s." % (message or "(unknown)") + header['descrip'] = 'xform matrices modified by %s.' % (message or '(unknown)') newimg = resampled.__class__(resampled.dataobj, orig.affine, header) newimg.to_filename(out_image) @@ -142,30 +144,30 @@ def overwrite_header(img, fname): header = img.header dataobj = img.dataobj - if getattr(img.dataobj, "_mmap", False): - raise ValueError("Image loaded with `mmap=True`. Aborting unsafe operation.") + if getattr(img.dataobj, '_mmap', False): + raise ValueError('Image loaded with `mmap=True`. Aborting unsafe operation.') set_consumables(header, dataobj) ondisk = nb.load(fname, mmap=False) - errmsg = "Cannot overwrite header (reason: {}).".format + errmsg = 'Cannot overwrite header (reason: {}).'.format if not isinstance(ondisk.header, img.header_class): - raise ValueError(errmsg("inconsistent header objects")) + raise ValueError(errmsg('inconsistent header objects')) if ( ondisk.get_data_dtype() != img.get_data_dtype() or img.header.get_data_shape() != ondisk.shape ): - raise ValueError(errmsg("data blocks are not the same size")) + raise ValueError(errmsg('data blocks are not the same size')) - if img.header["vox_offset"] != ondisk.dataobj.offset: - raise ValueError(errmsg("change in offset from start of file")) + if img.header['vox_offset'] != ondisk.dataobj.offset: + raise ValueError(errmsg('change in offset from start of file')) if not np.allclose( - img.header["scl_slope"], ondisk.dataobj.slope, equal_nan=True - ) or not np.allclose(img.header["scl_inter"], ondisk.dataobj.inter, equal_nan=True): - raise ValueError(errmsg("change in scale factors")) + img.header['scl_slope'], ondisk.dataobj.slope, equal_nan=True + ) or not np.allclose(img.header['scl_inter'], ondisk.dataobj.inter, equal_nan=True): + raise ValueError(errmsg('change in scale factors')) data = np.asarray(dataobj.get_unscaled()) img._dataobj = data # Allow old dataobj to be garbage collected @@ -174,7 +176,7 @@ def overwrite_header(img, fname): def update_header_fields(fname, **kwargs): - """ Adjust header fields """ + """Adjust header fields""" # No-op if not kwargs: return @@ -187,16 +189,17 @@ def update_header_fields(fname, **kwargs): def dseg_label(in_seg, label, newpath=None): """Extract a particular label from a discrete segmentation.""" from pathlib import Path + import nibabel as nb import numpy as np from nipype.utils.filemanip import fname_presuffix - newpath = Path(newpath or ".") + newpath = Path(newpath or '.') nii = nb.load(in_seg) data = np.int16(nii.dataobj) == label - out_file = fname_presuffix(in_seg, suffix="_mask", newpath=str(newpath.absolute())) + out_file = fname_presuffix(in_seg, suffix='_mask', newpath=str(newpath.absolute())) new = nii.__class__(data, nii.affine, nii.header) new.set_data_dtype(np.uint8) new.to_filename(out_file) @@ -206,8 +209,9 @@ def dseg_label(in_seg, label, newpath=None): def resample_by_spacing(in_file, zooms, order=3, clip=True, smooth=False): """Regrid the input image to match the new zooms.""" from pathlib import Path - import numpy as np + import nibabel as nb + import numpy as np from scipy.ndimage import map_coordinates if isinstance(in_file, (str, Path)): @@ -239,7 +243,7 @@ def resample_by_spacing(in_file, zooms, order=3, clip=True, smooth=False): np.arange(new_size[0]), np.arange(new_size[1]), np.arange(new_size[2]), - indexing="ij", + indexing='ij', ) ).reshape((3, -1)) @@ -250,6 +254,7 @@ def resample_by_spacing(in_file, zooms, order=3, clip=True, smooth=False): if smooth: from scipy.ndimage import gaussian_filter + if smooth is True: smooth = np.maximum(0, (pre_zooms / zooms - 1) / 2) data = gaussian_filter(in_file.get_fdata(), smooth) @@ -261,7 +266,7 @@ def resample_by_spacing(in_file, zooms, order=3, clip=True, smooth=False): data, ijk[:3, :], order=order, - mode="constant", + mode='constant', cval=0, prefilter=True, ).reshape(new_size) @@ -293,11 +298,12 @@ def resample_by_spacing(in_file, zooms, order=3, clip=True, smooth=False): def demean(in_file, in_mask, only_mask=False, newpath=None): """Demean ``in_file`` within the mask defined by ``in_mask``.""" import os - import numpy as np + import nibabel as nb + import numpy as np from nipype.utils.filemanip import fname_presuffix - out_file = fname_presuffix(in_file, suffix="_demeaned", newpath=os.getcwd()) + out_file = fname_presuffix(in_file, suffix='_demeaned', newpath=os.getcwd()) nii = nb.load(in_file) msk = np.asanyarray(nb.load(in_mask).dataobj) data = nii.get_fdata() @@ -312,13 +318,14 @@ def demean(in_file, in_mask, only_mask=False, newpath=None): def nii_ones_like(in_file, value, dtype, newpath=None): """Create a NIfTI file filled with ``value``, matching properties of ``in_file``.""" import os - import numpy as np + import nibabel as nb + import numpy as np nii = nb.load(in_file) data = np.ones(nii.shape, dtype=float) * value - out_file = os.path.join(newpath or os.getcwd(), "filled.nii.gz") + out_file = os.path.join(newpath or os.getcwd(), 'filled.nii.gz') nii = nb.Nifti1Image(data, nii.affine, nii.header) nii.set_data_dtype(dtype) nii.to_filename(out_file) diff --git a/niworkflows/utils/misc.py b/niworkflows/utils/misc.py index 18204ee116f..e06b956f86e 100644 --- a/niworkflows/utils/misc.py +++ b/niworkflows/utils/misc.py @@ -21,27 +21,28 @@ # https://www.nipreps.org/community/licensing/ # """Miscellaneous utilities.""" + +from __future__ import annotations + import os -from typing import Optional import warnings - __all__ = [ - "get_template_specs", - "fix_multi_T1w_source_name", - "add_suffix", - "read_crashfile", - "splitext", - "_copy_any", - "clean_directory", + 'get_template_specs', + 'fix_multi_T1w_source_name', + 'add_suffix', + 'read_crashfile', + 'splitext', + '_copy_any', + 'clean_directory', ] def get_template_specs( in_template: str, - template_spec: Optional[dict] = None, + template_spec: dict | None = None, default_resolution: int = 1, - fallback: bool = False + fallback: bool = False, ): """ Parse template specifications @@ -82,10 +83,10 @@ def get_template_specs( # Massage spec (start creating if None) template_spec = template_spec or {} - template_spec["desc"] = template_spec.get("desc", None) - template_spec["atlas"] = template_spec.get("atlas", None) - template_spec["resolution"] = template_spec.pop( - "res", template_spec.get("resolution", default_resolution) + template_spec['desc'] = template_spec.get('desc', None) + template_spec['atlas'] = template_spec.get('atlas', None) + template_spec['resolution'] = template_spec.pop( + 'res', template_spec.get('resolution', default_resolution) ) # Verify resolution is valid @@ -94,7 +95,7 @@ def get_template_specs( if not isinstance(res, list): try: res = [int(res)] - except Exception: + except ValueError: res = None if res is None: res = [] @@ -103,31 +104,30 @@ def get_template_specs( if not (set(res) & set(available_resolutions)): fallback_res = available_resolutions[0] if available_resolutions else None warnings.warn( - f"Template {in_template} does not have resolution: {res}." - f"Falling back to resolution: {fallback_res}." + f'Template {in_template} does not have resolution: {res}.' + f'Falling back to resolution: {fallback_res}.', + stacklevel=1, ) - template_spec["resolution"] = fallback_res + template_spec['resolution'] = fallback_res - common_spec = {"resolution": template_spec["resolution"]} - if "cohort" in template_spec: - common_spec["cohort"] = template_spec["cohort"] + common_spec = {'resolution': template_spec['resolution']} + if 'cohort' in template_spec: + common_spec['cohort'] = template_spec['cohort'] tpl_target_path = tf.get(in_template, **template_spec) if not tpl_target_path: raise RuntimeError( - """\ -Could not find template "{0}" with specs={1}. Please revise your template \ -argument.""".format( - in_template, template_spec - ) + f"""\ +Could not find template "{in_template}" with specs={template_spec}. Please revise your template \ +argument.""" ) if isinstance(tpl_target_path, list): raise RuntimeError( """\ -The available template modifiers ({0}) did not select a unique template \ -(got "{1}"). Please revise your template argument.""".format( - template_spec, ", ".join([str(p) for p in tpl_target_path]) +The available template modifiers ({}) did not select a unique template \ +(got "{}"). Please revise your template argument.""".format( + template_spec, ', '.join([str(p) for p in tpl_target_path]) ) ) @@ -150,6 +150,7 @@ def fix_multi_T1w_source_name(in_files): """ import os + from nipype.utils.filemanip import filename_to_list in_file = filename_to_list(in_files)[0] @@ -157,8 +158,8 @@ def fix_multi_T1w_source_name(in_files): in_file = in_file[0] base, in_file = os.path.split(in_file) - subject_label = in_file.split("_", 1)[0].split("-")[1] - return os.path.join(base, "sub-%s_T1w.nii.gz" % subject_label) + subject_label = in_file.split('_', 1)[0].split('-')[1] + return os.path.join(base, f'sub-{subject_label}_T1w.nii.gz') def add_suffix(in_files, suffix): @@ -172,31 +173,32 @@ def add_suffix(in_files, suffix): """ import os.path as op - from nipype.utils.filemanip import fname_presuffix, filename_to_list + + from nipype.utils.filemanip import filename_to_list, fname_presuffix return op.basename(fname_presuffix(filename_to_list(in_files)[0], suffix=suffix)) def read_crashfile(path): - if path.endswith(".pklz"): + if path.endswith('.pklz'): return _read_pkl(path) - elif path.endswith(".txt"): + elif path.endswith('.txt'): return _read_txt(path) - raise RuntimeError("unknown crashfile format") + raise RuntimeError('unknown crashfile format') def _read_pkl(path): from nipype.utils.filemanip import loadcrash crash_data = loadcrash(path) - data = {"file": path, "traceback": "".join(crash_data["traceback"])} - if "node" in crash_data: - data["node"] = crash_data["node"] - if data["node"].base_dir: - data["node_dir"] = data["node"].output_dir() + data = {'file': path, 'traceback': ''.join(crash_data['traceback'])} + if 'node' in crash_data: + data['node'] = crash_data['node'] + if data['node'].base_dir: + data['node_dir'] = data['node'].output_dir() else: - data["node_dir"] = "Node crashed before execution" - data["inputs"] = sorted(data["node"].inputs.trait_get().items()) + data['node_dir'] = 'Node crashed before execution' + data['inputs'] = sorted(data['node'].inputs.trait_get().items()) return data @@ -215,14 +217,14 @@ def _read_txt(path): from pathlib import Path lines = Path(path).read_text().splitlines() - data = {"file": str(path)} + data = {'file': str(path)} traceback_start = 0 - if lines[0].startswith("Node"): - data["node"] = lines[0].split(": ", 1)[1].strip() - data["node_dir"] = lines[1].split(": ", 1)[1].strip() + if lines[0].startswith('Node'): + data['node'] = lines[0].split(': ', 1)[1].strip() + data['node_dir'] = lines[1].split(': ', 1)[1].strip() inputs = [] - cur_key = "" - cur_val = "" + cur_key = '' + cur_val = '' for i, line in enumerate(lines[5:]): if not line.strip(): continue @@ -234,16 +236,16 @@ def _read_txt(path): if cur_val: inputs.append((cur_key, cur_val.strip())) - if line.startswith("Traceback ("): + if line.startswith('Traceback ('): traceback_start = i + 5 break - cur_key, cur_val = tuple(line.split(" = ", 1)) + cur_key, cur_val = tuple(line.split(' = ', 1)) - data["inputs"] = sorted(inputs) + data['inputs'] = sorted(inputs) else: - data["node_dir"] = "Node crashed before execution" - data["traceback"] = "\n".join(lines[traceback_start:]).strip() + data['node_dir'] = 'Node crashed before execution' + data['traceback'] = '\n'.join(lines[traceback_start:]).strip() return data @@ -272,18 +274,19 @@ def splitext(fname): from pathlib import Path basename = str(Path(fname).name) - stem = Path(basename.rstrip(".gz")).stem - return stem, basename[len(stem):] + stem = Path(basename.rstrip('.gz')).stem + return stem, basename[len(stem) :] def _copy_any(src, dst): - import os import gzip + import os from shutil import copyfileobj + from nipype.utils.filemanip import copyfile - src_isgz = os.fspath(src).endswith(".gz") - dst_isgz = os.fspath(dst).endswith(".gz") + src_isgz = os.fspath(src).endswith('.gz') + dst_isgz = os.fspath(dst).endswith('.gz') if not src_isgz and not dst_isgz: copyfile(src, dst, copy=True, use_hardlink=True) return False # Make sure we do not reuse the hardlink later @@ -293,11 +296,11 @@ def _copy_any(src, dst): os.unlink(dst) src_open = gzip.open if src_isgz else open - with src_open(src, "rb") as f_in: - with open(dst, "wb") as f_out: + with src_open(src, 'rb') as f_in: + with open(dst, 'wb') as f_out: if dst_isgz: # Remove FNAME header from gzip (nipreps/fmriprep#1480) - gz_out = gzip.GzipFile("", "wb", 9, f_out, 0.0) + gz_out = gzip.GzipFile('', 'wb', 9, f_out, 0.0) copyfileobj(f_in, gz_out) gz_out.close() else: @@ -317,8 +320,8 @@ def clean_directory(path): This function is not guaranteed to work across multiple threads or processes. """ - from pathlib import Path import shutil + from pathlib import Path try: for f in Path(path).iterdir(): @@ -363,16 +366,17 @@ def check_valid_fs_license(): FreeSurfer successfully executed (valid license) """ - from pathlib import Path import subprocess as sp + from pathlib import Path from tempfile import TemporaryDirectory + from .. import data - with TemporaryDirectory() as tmpdir, data.load.as_path("sentinel.nii.gz") as sentinel: + with TemporaryDirectory() as tmpdir, data.load.as_path('sentinel.nii.gz') as sentinel: # quick FreeSurfer command - _cmd = ("mri_convert", str(sentinel), str(Path(tmpdir) / "out.mgz")) + _cmd = ('mri_convert', str(sentinel), str(Path(tmpdir) / 'out.mgz')) proc = sp.run(_cmd, stdout=sp.PIPE, stderr=sp.STDOUT) - return proc.returncode == 0 and "ERROR:" not in proc.stdout.decode() + return proc.returncode == 0 and 'ERROR:' not in proc.stdout.decode() def unlink(pathlike, missing_ok=False): @@ -385,5 +389,5 @@ def unlink(pathlike, missing_ok=False): raise -if __name__ == "__main__": +if __name__ == '__main__': pass diff --git a/niworkflows/utils/spaces.py b/niworkflows/utils/spaces.py index 3cf8ec1dc72..eb82851bd7c 100644 --- a/niworkflows/utils/spaces.py +++ b/niworkflows/utils/spaces.py @@ -21,36 +21,38 @@ # https://www.nipreps.org/community/licensing/ # """Utilities for tracking and filtering spaces.""" + import argparse -import attr from collections import defaultdict from itertools import product + +import attr from templateflow import api as _tfapi NONSTANDARD_REFERENCES = [ - "T1w", - "T2w", - "anat", - "fsnative", - "func", - "run", - "sbref", - "session", - "individual", - "dwi", - "asl", + 'T1w', + 'T2w', + 'anat', + 'fsnative', + 'func', + 'run', + 'sbref', + 'session', + 'individual', + 'dwi', + 'asl', ] """List of supported nonstandard reference spaces.""" -NONSTANDARD_2D_REFERENCES = ["fsnative"] +NONSTANDARD_2D_REFERENCES = ['fsnative'] """List of supported nonstandard 2D reference spaces.""" FSAVERAGE_DENSITY = { - "fsaverage3": "642", - "fsaverage4": "2562", - "fsaverage5": "10k", - "fsaverage6": "41k", - "fsaverage": "164k", + 'fsaverage3': '642', + 'fsaverage4': '2562', + 'fsaverage5': '10k', + 'fsaverage6': '41k', + 'fsaverage': '164k', } """A map of legacy fsaverageX names to surface densities.""" @@ -150,7 +152,7 @@ class Reference: """ _standard_spaces = tuple(_tfapi.templates()) - _spaces_2d = tuple(_tfapi.templates(suffix="sphere")) + _spaces_2d = tuple(_tfapi.templates(suffix='sphere')) space = attr.ib(default=None, type=str) """Name designating this space.""" @@ -167,41 +169,40 @@ class Reference: def __attrs_post_init__(self): """Extract cohort out of spec.""" if self.spec is None: - object.__setattr__(self, "spec", {}) + object.__setattr__(self, 'spec', {}) - if self.space.startswith("fsaverage"): + if self.space.startswith('fsaverage'): space = self.space - object.__setattr__(self, "space", "fsaverage") + object.__setattr__(self, 'space', 'fsaverage') - if "den" not in self.spec or space != "fsaverage": + if 'den' not in self.spec or space != 'fsaverage': spec = self.spec.copy() - spec["den"] = FSAVERAGE_DENSITY[space] - object.__setattr__(self, "spec", spec) + spec['den'] = FSAVERAGE_DENSITY[space] + object.__setattr__(self, 'spec', spec) if (self.space in self._spaces_2d) or (self.space in NONSTANDARD_2D_REFERENCES): - object.__setattr__(self, "dim", 2) + object.__setattr__(self, 'dim', 2) if self.space in self._standard_spaces: - object.__setattr__(self, "standard", True) + object.__setattr__(self, 'standard', True) - _cohorts = ["%s" % t for t in _tfapi.TF_LAYOUT.get_cohorts(template=self.space)] - if "cohort" in self.spec: + _cohorts = [f'{t}' for t in _tfapi.TF_LAYOUT.get_cohorts(template=self.space)] + if 'cohort' in self.spec: if not _cohorts: raise ValueError( - 'standard space "%s" does not accept a cohort ' - "specification." % self.space + f'standard space "{self.space}" does not accept a cohort specification.' ) - if str(self.spec["cohort"]) not in _cohorts: + if str(self.spec['cohort']) not in _cohorts: raise ValueError( - 'standard space "%s" does not contain any cohort ' - 'named "%s".' % (self.space, self.spec["cohort"]) + f'standard space "{self.space}" does not contain any cohort ' + f'named "{self.spec["cohort"]}".' ) elif _cohorts: - _cohorts = ", ".join(['"cohort-%s"' % c for c in _cohorts]) + _cohorts = ', '.join([f'"cohort-{c}"' for c in _cohorts]) raise ValueError( - 'standard space "%s" is not fully defined.\n' - "Set a valid cohort selector from: %s." % (self.space, _cohorts) + f'standard space "{self.space}" is not fully defined.\n' + f'Set a valid cohort selector from: {_cohorts}.' ) @property @@ -218,9 +219,9 @@ def fullname(self): 'MNIPediatricAsym:cohort-1' """ - if "cohort" not in self.spec: + if 'cohort' not in self.spec: return self.space - return "%s:cohort-%s" % (self.space, self.spec["cohort"]) + return f'{self.space}:cohort-{self.spec["cohort"]}' @property def legacyname(self): @@ -247,18 +248,18 @@ def legacyname(self): True """ - if self.space == "fsaverage" and self.spec["den"] in FSAVERAGE_LEGACY: - return FSAVERAGE_LEGACY[self.spec["den"]] + if self.space == 'fsaverage' and self.spec['den'] in FSAVERAGE_LEGACY: + return FSAVERAGE_LEGACY[self.spec['den']] @space.validator def _check_name(self, attribute, value): - if value.startswith("fsaverage"): + if value.startswith('fsaverage'): return valid = list(self._standard_spaces) + NONSTANDARD_REFERENCES if value not in valid: raise ValueError( - 'space identifier "%s" is invalid.\nValid ' - "identifiers are: %s" % (value, ", ".join(valid)) + f'space identifier "{value}" is invalid.\n' + f'Valid identifiers are: {", ".join(valid)}' ) def __str__(self): @@ -271,8 +272,8 @@ def __str__(self): 'MNIPediatricAsym:cohort-2:res-1' """ - return ":".join( - [self.space] + ["-".join((k, str(v))) for k, v in sorted(self.spec.items())] + return ':'.join( + [self.space] + ['-'.join((k, str(v))) for k, v in sorted(self.spec.items())] ) @classmethod @@ -344,10 +345,10 @@ def from_string(cls, value): Reference(space='MNIPediatricAsym', spec={'cohort': '6', 'res': 'iso1.6mm'})] """ - _args = value.split(":") + _args = value.split(':') spec = defaultdict(list, {}) for modifier in _args[1:]: - mitems = modifier.split("-", 1) + mitems = modifier.split('-', 1) spec[mitems[0]].append(len(mitems) == 1 or mitems[1]) allspecs = _expand_entities(spec) @@ -463,7 +464,7 @@ class SpatialReferences: """ - __slots__ = ("_refs", "_cached") + __slots__ = ('_refs', '_cached') standard_spaces = tuple(_tfapi.templates()) """List of supported standard reference spaces.""" @@ -509,7 +510,7 @@ def __init__(self, spaces=None, checkpoint=False): def __iadd__(self, b): """Append a list of transforms to the internal list.""" if not isinstance(b, (list, tuple)): - raise TypeError("Must be a list.") + raise TypeError('Must be a list.') for space in b: self.append(space) @@ -541,8 +542,8 @@ def __str__(self): Spatial References: MNI152NLin2009cAsym, fsaverage:den-10k """ - spaces = ", ".join([str(s) for s in self.references]) or "." - return "Spatial References: %s" % spaces + spaces = ', '.join([str(s) for s in self.references]) or '.' + return f'Spatial References: {spaces}' @property def references(self): @@ -553,7 +554,7 @@ def references(self): def cached(self): """Get cached spaces, raise error if not cached.""" if not self.is_cached(): - raise ValueError("References have not been cached") + raise ValueError('References have not been cached') return self._cached def is_cached(self): @@ -562,7 +563,7 @@ def is_cached(self): def checkpoint(self, force=False): """Cache and freeze current spaces to separate attribute.""" if self.is_cached() and not force: - raise ValueError("References have already been cached") + raise ValueError('References have already been cached') self._cached = self.__class__(self.references) def add(self, value): @@ -576,14 +577,14 @@ def append(self, value): self._refs += [self.check_space(value)] return - raise ValueError('space "%s" already in spaces.' % str(value)) + raise ValueError(f'space "{value}" already in spaces.') def insert(self, index, value, error=True): """Concatenate one more space.""" if value not in self: self._refs.insert(index, self.check_space(value)) elif error is True: - raise ValueError('space "%s" already in spaces.' % str(value)) + raise ValueError(f'space "{value}" already in spaces.') def get_spaces(self, standard=True, nonstandard=True, dim=(2, 3)): """ @@ -654,9 +655,7 @@ def get_standard(self, full_spec=False, dim=(2, 3)): return [ s for s in self.references - if s.standard - and s.dim in dim - and (hasspec("res", s.spec) or hasspec("den", s.spec)) + if s.standard and s.dim in dim and (hasspec('res', s.spec) or hasspec('den', s.spec)) ] def get_nonstandard(self, full_spec=False, dim=(2, 3)): @@ -668,7 +667,7 @@ def get_nonstandard(self, full_spec=False, dim=(2, 3)): for s in self.references if not s.standard and s.dim in dim - and (hasspec("res", s.spec) or hasspec("den", s.spec)) + and (hasspec('res', s.spec) or hasspec('den', s.spec)) ] def get_fs_spaces(self): @@ -698,7 +697,7 @@ def get_fs_spaces(self): return [ s.legacyname or s.space for s in self.references - if s.legacyname or s.space == "fsnative" + if s.legacyname or s.space == 'fsnative' ] @@ -712,18 +711,18 @@ def __call__(self, parser, namespace, values, option_string=None): # option was called without any output spaces, so user does not want outputs spaces.checkpoint() for val in values: - val = val.rstrip(":") + val = val.rstrip(':') if ( val not in NONSTANDARD_REFERENCES - and not val.split(":")[0].startswith("fs") - and ":res-" not in val - and ":resolution-" not in val + and not val.split(':')[0].startswith('fs') + and ':res-' not in val + and ':resolution-' not in val ): # by default, explicitly set volumetric resolution to native # relevant discussions: # https://github.com/nipreps/niworkflows/pull/457#discussion_r375510227 # https://github.com/nipreps/niworkflows/pull/494 - val = ":".join((val, "res-native")) + val = ':'.join((val, 'res-native')) for sp in Reference.from_string(val): spaces.add(sp) setattr(namespace, self.dest, spaces) @@ -749,11 +748,11 @@ def format_reference(in_tuple): 'MNIPediatricAsym_cohort-2_res-2' """ - out = in_tuple[0].split(":") - res = in_tuple[1].get("res", None) or in_tuple[1].get("resolution", None) + out = in_tuple[0].split(':') + res = in_tuple[1].get('res', None) or in_tuple[1].get('resolution', None) if res: - out.append("-".join(("res", str(res)))) - return "_".join(out) + out.append('-'.join(('res', str(res)))) + return '_'.join(out) def reference2dict(in_tuple): @@ -772,13 +771,11 @@ def reference2dict(in_tuple): {'space': 'MNIPediatricAsym', 'cohort': '2', 'resolution': '2', 'density': '91k'} """ - tpl_entities = ("space", "cohort") - retval = { - tpl_entities[i]: v.split("-")[i] for i, v in enumerate(in_tuple[0].split(":")) - } + tpl_entities = ('space', 'cohort') + retval = {tpl_entities[i]: v.split('-')[i] for i, v in enumerate(in_tuple[0].split(':'))} retval.update( { - "resolution" if k == "res" else "density" if k == "den" else k: f"{v}" + 'resolution' if k == 'res' else 'density' if k == 'den' else k: f'{v}' for k, v in in_tuple[1].items() } ) @@ -813,4 +810,4 @@ def _expand_entities(entities): """ keys = list(entities.keys()) values = list(product(*[entities[k] for k in keys])) - return [{k: v for k, v in zip(keys, combs)} for combs in values] + return [dict(zip(keys, combs)) for combs in values] diff --git a/niworkflows/utils/testing.py b/niworkflows/utils/testing.py index 4eb0b9a57d5..cf0a271c800 100644 --- a/niworkflows/utils/testing.py +++ b/niworkflows/utils/testing.py @@ -1,6 +1,7 @@ -from copy import deepcopy import json +from copy import deepcopy from pathlib import Path + import yaml @@ -26,38 +27,37 @@ def generate_bids_skeleton(target_path, bids_config): try: bids_dict = json.loads(bids_config) except json.JSONDecodeError: - bids_dict = yaml.load(bids_config, Loader=yaml.Loader) + bids_dict = yaml.safe_load(bids_config) _bids_dict = deepcopy(bids_dict) root = Path(target_path).absolute() root.mkdir(parents=True) - desc = bids_dict.pop("dataset_description", None) + desc = bids_dict.pop('dataset_description', None) if desc is None: # default description - desc = {"Name": "Default", "BIDSVersion": "1.6.0"} - to_json(root / "dataset_description.json", desc) + desc = {'Name': 'Default', 'BIDSVersion': '1.6.0'} + to_json(root / 'dataset_description.json', desc) cached_subject_data = None for subject, sessions in bids_dict.items(): - bids_subject = subject if subject.startswith("sub-") else f"sub-{subject}" + bids_subject = subject if subject.startswith('sub-') else f'sub-{subject}' subj_path = root / bids_subject subj_path.mkdir(exist_ok=True) - if sessions == "*": # special case to copy previous subject data + if sessions == '*': # special case to copy previous subject data sessions = cached_subject_data.copy() if isinstance(sessions, dict): # single session - sessions.update({"session": None}) + sessions.update({'session': None}) sessions = [sessions] cached_subject_data = deepcopy(sessions) for session in sessions: - - ses_name = session.pop("session", None) + ses_name = session.pop('session', None) if ses_name is not None: - bids_session = ses_name if ses_name.startswith("ses-") else f"ses-{ses_name}" - bids_prefix = f"{bids_subject}_{bids_session}" + bids_session = ses_name if ses_name.startswith('ses-') else f'ses-{ses_name}' + bids_prefix = f'{bids_subject}_{bids_session}' curr_path = subj_path / bids_session curr_path.mkdir(exist_ok=True) else: @@ -73,16 +73,16 @@ def generate_bids_skeleton(target_path, bids_config): files = [files] for bids_file in files: - metadata = bids_file.pop("metadata", None) - extension = bids_file.pop("extension", ".nii.gz") - suffix = bids_file.pop("suffix") + metadata = bids_file.pop('metadata', None) + extension = bids_file.pop('extension', '.nii.gz') + suffix = bids_file.pop('suffix') entities = combine_entities(**bids_file) - data_file = modality_path / f"{bids_prefix}{entities}_{suffix}{extension}" + data_file = modality_path / f'{bids_prefix}{entities}_{suffix}{extension}' data_file.touch() if metadata is not None: out_metadata = data_file.parent / data_file.name.replace( - extension, ".json" + extension, '.json' ) to_json(out_metadata, metadata) @@ -96,4 +96,4 @@ def to_json(filename, data): def combine_entities(**entities): - return f"_{'_'.join([f'{lab}-{val}' for lab, val in entities.items()])}" if entities else "" + return f"_{'_'.join([f'{lab}-{val}' for lab, val in entities.items()])}" if entities else '' diff --git a/niworkflows/utils/tests/test_bids_skeleton.py b/niworkflows/utils/tests/test_bids_skeleton.py index ba2b102b026..40dcb64ecec 100644 --- a/niworkflows/utils/tests/test_bids_skeleton.py +++ b/niworkflows/utils/tests/test_bids_skeleton.py @@ -1,152 +1,149 @@ +import json + import pytest from bids import BIDSLayout -import json - from ..testing import generate_bids_skeleton - bids_dir_sessions = { - "dataset_description": {"Name": "sample", "BIDSVersion": "1.6.0"}, - "01": [ # composed of dictionaries, pertaining to sessions + 'dataset_description': {'Name': 'sample', 'BIDSVersion': '1.6.0'}, + '01': [ # composed of dictionaries, pertaining to sessions { - "session": "pre", - "anat": [{"suffix": "T1w", "metadata": {"EchoTime": 1}}], # anatomical files - "func": [ # bold files + 'session': 'pre', + 'anat': [{'suffix': 'T1w', 'metadata': {'EchoTime': 1}}], # anatomical files + 'func': [ # bold files { - "task": "rest", - "echo": 1, - "suffix": "bold", - "metadata": { - "RepetitionTime": 0.8, - "EchoTime": 0.5, - "TotalReadoutTime": 0.5, - "PhaseEncodingDirection": "j", + 'task': 'rest', + 'echo': 1, + 'suffix': 'bold', + 'metadata': { + 'RepetitionTime': 0.8, + 'EchoTime': 0.5, + 'TotalReadoutTime': 0.5, + 'PhaseEncodingDirection': 'j', }, }, { - "task": "rest", - "echo": 2, - "suffix": "bold", - "metadata": { - "RepetitionTime": 0.8, - "EchoTime": 0.7, - "TotalReadoutTime": 0.5, - "PhaseEncodingDirection": "j", + 'task': 'rest', + 'echo': 2, + 'suffix': 'bold', + 'metadata': { + 'RepetitionTime': 0.8, + 'EchoTime': 0.7, + 'TotalReadoutTime': 0.5, + 'PhaseEncodingDirection': 'j', }, }, ], }, { - "session": "post", - "anat": {"suffix": "T2w", "metadata": {"EchoTime": 2}}, - "func": { - "task": "rest", - "acq": "lowres", - "suffix": "bold", - "metadata": {"RepetitionTime": 0.8, "PhaseEncodingDirection": "j-"}, + 'session': 'post', + 'anat': {'suffix': 'T2w', 'metadata': {'EchoTime': 2}}, + 'func': { + 'task': 'rest', + 'acq': 'lowres', + 'suffix': 'bold', + 'metadata': {'RepetitionTime': 0.8, 'PhaseEncodingDirection': 'j-'}, }, }, ], - "02": "*", - "03": "*", + '02': '*', + '03': '*', } bids_dir_session_less = { - "01": [ # composed of dictionaries, pertaining to sessions + '01': [ # composed of dictionaries, pertaining to sessions { - "anat": {"suffix": "T1w", "metadata": {"EchoTime": 1}}, - "func": [ # bold files + 'anat': {'suffix': 'T1w', 'metadata': {'EchoTime': 1}}, + 'func': [ # bold files { - "task": "rest", - "echo": 1, - "suffix": "bold", - "metadata": { - "EchoTime": 0.5, - "TotalReadoutTime": 0.5, - "PhaseEncodingDirection": "j", + 'task': 'rest', + 'echo': 1, + 'suffix': 'bold', + 'metadata': { + 'EchoTime': 0.5, + 'TotalReadoutTime': 0.5, + 'PhaseEncodingDirection': 'j', }, }, { - "task": "rest", - "echo": 2, - "suffix": "bold", - "metadata": { - "EchoTime": 0.7, - "TotalReadoutTime": 0.5, - "PhaseEncodingDirection": "j", + 'task': 'rest', + 'echo': 2, + 'suffix': 'bold', + 'metadata': { + 'EchoTime': 0.7, + 'TotalReadoutTime': 0.5, + 'PhaseEncodingDirection': 'j', }, }, ], } ], - "02": "*", - "03": { - "anat": {"suffix": "T1w", "metadata": {"EchoTime": 1}}, - "func": [ # bold files + '02': '*', + '03': { + 'anat': {'suffix': 'T1w', 'metadata': {'EchoTime': 1}}, + 'func': [ # bold files { - "task": "diff", - "echo": 1, - "suffix": "bold", - "metadata": { - "EchoTime": 0.5, - "TotalReadoutTime": 0.5, - "PhaseEncodingDirection": "j", + 'task': 'diff', + 'echo': 1, + 'suffix': 'bold', + 'metadata': { + 'EchoTime': 0.5, + 'TotalReadoutTime': 0.5, + 'PhaseEncodingDirection': 'j', }, }, { - "task": "diff", - "echo": 2, - "suffix": "bold", - "metadata": { - "EchoTime": 0.7, - "TotalReadoutTime": 0.5, - "PhaseEncodingDirection": "j", + 'task': 'diff', + 'echo': 2, + 'suffix': 'bold', + 'metadata': { + 'EchoTime': 0.7, + 'TotalReadoutTime': 0.5, + 'PhaseEncodingDirection': 'j', }, }, ], }, - "04": "*", + '04': '*', } bids_dir_deriv = { - "dataset_description": { - "Name": "derivs", - "DatasetType": "derivative", - "BIDSVersion": "1.9.0", - "GeneratedBy": [ - {"Name": "Niworkflows"} - ] + 'dataset_description': { + 'Name': 'derivs', + 'DatasetType': 'derivative', + 'BIDSVersion': '1.9.0', + 'GeneratedBy': [{'Name': 'Niworkflows'}], }, - "01": { - "anat": [ - {"suffix": "white", "hemi": "L", "extension": ".surf.gii"}, - {"suffix": "white", "hemi": "R", "extension": ".surf.gii"}, - {"suffix": "xfm", "to": "MNI152NLin2009cAsym", "from": "T1w", "extension": ".h5"} + '01': { + 'anat': [ + {'suffix': 'white', 'hemi': 'L', 'extension': '.surf.gii'}, + {'suffix': 'white', 'hemi': 'R', 'extension': '.surf.gii'}, + {'suffix': 'xfm', 'to': 'MNI152NLin2009cAsym', 'from': 'T1w', 'extension': '.h5'}, ] - } + }, } @pytest.mark.parametrize( - "test_id,json_layout,n_files,n_subjects,n_sessions", + ('test_id', 'json_layout', 'n_files', 'n_subjects', 'n_sessions'), [ ('sessions', bids_dir_sessions, 31, 3, 2), ('nosession', bids_dir_session_less, 25, 4, 0), - ('derivatives', bids_dir_deriv, 4, 1, 0) + ('derivatives', bids_dir_deriv, 4, 1, 0), ], ) def test_generate_bids_skeleton(tmp_path, test_id, json_layout, n_files, n_subjects, n_sessions): root = tmp_path / test_id generate_bids_skeleton(root, json_layout) - datadesc = root / "dataset_description.json" + datadesc = root / 'dataset_description.json' assert datadesc.exists() desc = json.loads(datadesc.read_text()) - assert "BIDSVersion" in desc + assert 'BIDSVersion' in desc if test_id == 'derivatives': - assert desc["DatasetType"] == "derivative" + assert desc['DatasetType'] == 'derivative' - assert len([x for x in root.glob("**/*") if x.is_file()]) == n_files + assert len([x for x in root.glob('**/*') if x.is_file()]) == n_files # ensure layout is valid layout = BIDSLayout(root, validate=False) @@ -154,12 +151,12 @@ def test_generate_bids_skeleton(tmp_path, test_id, json_layout, n_files, n_subje assert len(layout.get_sessions()) == n_sessions if test_id != 'derivatives': - anat = layout.get(suffix="T1w", extension=".nii.gz")[0] - bold = layout.get(suffix="bold", extension=".nii.gz")[0] + anat = layout.get(suffix='T1w', extension='.nii.gz')[0] + bold = layout.get(suffix='bold', extension='.nii.gz')[0] assert anat.get_metadata() assert bold.get_metadata() else: - white = layout.get(suffix="white") + white = layout.get(suffix='white') assert len(white) == 2 - xfm = layout.get(suffix="xfm")[0] + xfm = layout.get(suffix='xfm')[0] assert xfm diff --git a/niworkflows/utils/tests/test_images.py b/niworkflows/utils/tests/test_images.py index 10347654b31..dbcfc97a5e0 100644 --- a/niworkflows/utils/tests/test_images.py +++ b/niworkflows/utils/tests/test_images.py @@ -22,13 +22,13 @@ # import nibabel as nb import numpy as np - import pytest + from ..images import ( - update_header_fields, - overwrite_header, dseg_label, + overwrite_header, resample_by_spacing, + update_header_fields, ) @@ -37,20 +37,20 @@ def random_image(): @pytest.mark.parametrize( - "fields", + 'fields', [ {}, - {"intent_code": 0}, - {"intent_code": 0, "sform_code": 4}, - {"sform_code": 3}, + {'intent_code': 0}, + {'intent_code': 0, 'sform_code': 4}, + {'sform_code': 3}, # Changes to these fields have no effect - {"scl_slope": 3.0, "scl_inter": 3.0}, - {"vox_offset": 20.0}, + {'scl_slope': 3.0, 'scl_inter': 3.0}, + {'vox_offset': 20.0}, ], ) -@pytest.mark.parametrize("slope, inter", [(None, None), (1.0, 0.0), (2.0, 2.0)]) +@pytest.mark.parametrize(('slope', 'inter'), [(None, None), (1.0, 0.0), (2.0, 2.0)]) def test_update_header_fields(tmp_path, fields, slope, inter): - fname = str(tmp_path / "test_file.nii") + fname = str(tmp_path / 'test_file.nii') # Generate file init_img = random_image() @@ -70,45 +70,45 @@ def test_update_header_fields(tmp_path, fields, slope, inter): assert np.array_equal(pre_data, post_img.get_fdata()) -@pytest.mark.parametrize("fields", [{"datatype": 2}]) -@pytest.mark.parametrize("slope, inter", [(None, None), (2.0, 2.0)]) +@pytest.mark.parametrize('fields', [{'datatype': 2}]) +@pytest.mark.parametrize(('slope', 'inter'), [(None, None), (2.0, 2.0)]) def test_update_header_fields_exceptions(tmp_path, fields, slope, inter): - fname = str(tmp_path / "test_file.nii") + fname = str(tmp_path / 'test_file.nii') # Generate file img = random_image() img.header.set_slope_inter(slope, inter) img.to_filename(fname) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'data blocks are not the same size'): update_header_fields(fname, **fields) def test_overwrite_header_reject_mmap(tmp_path): - fname = str(tmp_path / "test_file.nii") + fname = str(tmp_path / 'test_file.nii') random_image().to_filename(fname) img = nb.load(fname, mmap=True) - with pytest.raises(ValueError): + with pytest.raises(ValueError, match=r'mmap.*unsafe'): overwrite_header(img, fname) def test_dseg_label(tmp_path): - fname = str(tmp_path / "test_file.nii.gz") + fname = str(tmp_path / 'test_file.nii.gz') data = np.dstack( ( - np.zeros((20, 20), dtype="int16"), - np.ones((20, 20), dtype="int16"), - np.ones((20, 20), dtype="int16") * 2, - np.ones((20, 20), dtype="int16") * 3, + np.zeros((20, 20), dtype='int16'), + np.ones((20, 20), dtype='int16'), + np.ones((20, 20), dtype='int16') * 2, + np.ones((20, 20), dtype='int16') * 3, ) ) nb.Nifti1Image(data, np.eye(4), None).to_filename(fname) new_im = nb.load(dseg_label(fname, label=2, newpath=tmp_path)) - assert np.all((data == 2).astype("int16") == np.int16(new_im.dataobj)) + assert np.all((data == 2).astype('int16') == np.int16(new_im.dataobj)) def test_resample_by_spacing(): diff --git a/niworkflows/utils/tests/test_misc.py b/niworkflows/utils/tests/test_misc.py index ffe039ba14e..cc4a39a37d8 100644 --- a/niworkflows/utils/tests/test_misc.py +++ b/niworkflows/utils/tests/test_misc.py @@ -21,17 +21,20 @@ # https://www.nipreps.org/community/licensing/ # """Test misc module.""" + import os import shutil from unittest import mock import pytest -from ..misc import pass_dummy_scans, check_valid_fs_license + from niworkflows.testing import has_freesurfer +from ..misc import check_valid_fs_license, pass_dummy_scans + @pytest.mark.parametrize( - "algo_dummy_scans,dummy_scans,expected_out", [(2, 1, 1), (2, None, 2), (2, 0, 0)] + ('algo_dummy_scans', 'dummy_scans', 'expected_out'), [(2, 1, 1), (2, None, 2), (2, 0, 0)] ) def test_pass_dummy_scans(algo_dummy_scans, dummy_scans, expected_out): """Check dummy scans passing.""" @@ -41,32 +44,32 @@ def test_pass_dummy_scans(algo_dummy_scans, dummy_scans, expected_out): @pytest.mark.parametrize( - "stdout,rc,valid", + ('stdout', 'rc', 'valid'), [ - (b"Successful command", 0, True), - (b"", 0, True), - (b"ERROR: FreeSurfer license file /made/up/license.txt not found", 1, False), - (b"Failed output", 1, False), - (b"ERROR: Systems running GNU glibc version greater than 2.15", 0, False), + (b'Successful command', 0, True), + (b'', 0, True), + (b'ERROR: FreeSurfer license file /made/up/license.txt not found', 1, False), + (b'Failed output', 1, False), + (b'ERROR: Systems running GNU glibc version greater than 2.15', 0, False), ], ) def test_fs_license_check(stdout, rc, valid): - with mock.patch("subprocess.run") as mocked_run: + with mock.patch('subprocess.run') as mocked_run: mocked_run.return_value.stdout = stdout mocked_run.return_value.returncode = rc assert check_valid_fs_license() is valid -@pytest.mark.skipif(not has_freesurfer, reason="Needs FreeSurfer") -@pytest.mark.skipif(not os.getenv("FS_LICENSE"), reason="No FS license found") +@pytest.mark.skipif(not has_freesurfer, reason='Needs FreeSurfer') +@pytest.mark.skipif(not os.getenv('FS_LICENSE'), reason='No FS license found') def test_fs_license_check2(monkeypatch): """Execute the canary itself.""" assert check_valid_fs_license() is True -@pytest.mark.skipif(shutil.which('mri_convert') is None, reason="FreeSurfer not installed") +@pytest.mark.skipif(shutil.which('mri_convert') is None, reason='FreeSurfer not installed') def test_fs_license_check3(monkeypatch): with monkeypatch.context() as m: - m.delenv("FS_LICENSE", raising=False) - m.delenv("FREESURFER_HOME", raising=False) + m.delenv('FS_LICENSE', raising=False) + m.delenv('FREESURFER_HOME', raising=False) assert check_valid_fs_license() is False diff --git a/niworkflows/utils/tests/test_spaces.py b/niworkflows/utils/tests/test_spaces.py index 2c72378f987..bf5cfca5b35 100644 --- a/niworkflows/utils/tests/test_spaces.py +++ b/niworkflows/utils/tests/test_spaces.py @@ -21,8 +21,10 @@ # https://www.nipreps.org/community/licensing/ # """Test spaces.""" + import pytest -from ..spaces import Reference, SpatialReferences, OutputReferencesAction + +from ..spaces import OutputReferencesAction, Reference, SpatialReferences @pytest.fixture @@ -32,72 +34,72 @@ def parser(): pars = argparse.ArgumentParser() pars.add_argument( - "--spaces", - nargs="*", + '--spaces', + nargs='*', default=SpatialReferences(), action=OutputReferencesAction, - help="user defined spaces", + help='user defined spaces', ) return pars @pytest.mark.parametrize( - "spaces, expected", + ('spaces', 'expected'), [ - (("MNI152NLin6Asym",), ("MNI152NLin6Asym:res-native",)), + (('MNI152NLin6Asym',), ('MNI152NLin6Asym:res-native',)), ( - ("fsaverage:den-10k", "MNI152NLin6Asym"), - ("fsaverage:den-10k", "MNI152NLin6Asym:res-native"), + ('fsaverage:den-10k', 'MNI152NLin6Asym'), + ('fsaverage:den-10k', 'MNI152NLin6Asym:res-native'), ), ( - ("fsaverage:den-10k:den-30k", "MNI152NLin6Asym:res-1:res-2"), + ('fsaverage:den-10k:den-30k', 'MNI152NLin6Asym:res-1:res-2'), ( - "fsaverage:den-10k", - "fsaverage:den-30k", - "MNI152NLin6Asym:res-1", - "MNI152NLin6Asym:res-2", + 'fsaverage:den-10k', + 'fsaverage:den-30k', + 'MNI152NLin6Asym:res-1', + 'MNI152NLin6Asym:res-2', ), ), ( - ("fsaverage:den-10k:den-30k", "MNI152NLin6Asym:res-1:res-2", "fsaverage5"), + ('fsaverage:den-10k:den-30k', 'MNI152NLin6Asym:res-1:res-2', 'fsaverage5'), ( - "fsaverage:den-10k", - "fsaverage:den-30k", - "MNI152NLin6Asym:res-1", - "MNI152NLin6Asym:res-2", + 'fsaverage:den-10k', + 'fsaverage:den-30k', + 'MNI152NLin6Asym:res-1', + 'MNI152NLin6Asym:res-2', ), ), ( ( - "fsaverage:den-10k:den-30k", - "MNI152NLin6Asym:res-1:res-2", - "fsaverage:den-10k:den-30k", - "MNI152NLin6Asym:res-1:res-2", + 'fsaverage:den-10k:den-30k', + 'MNI152NLin6Asym:res-1:res-2', + 'fsaverage:den-10k:den-30k', + 'MNI152NLin6Asym:res-1:res-2', ), ( - "fsaverage:den-10k", - "fsaverage:den-30k", - "MNI152NLin6Asym:res-1", - "MNI152NLin6Asym:res-2", + 'fsaverage:den-10k', + 'fsaverage:den-30k', + 'MNI152NLin6Asym:res-1', + 'MNI152NLin6Asym:res-2', ), ), - (("MNI152NLin6Asym", "func"), ("MNI152NLin6Asym:res-native", "func")), + (('MNI152NLin6Asym', 'func'), ('MNI152NLin6Asym:res-native', 'func')), ], ) def test_space_action(parser, spaces, expected): """Test action.""" - pargs = parser.parse_known_args(args=("--spaces",) + spaces)[0] + pargs = parser.parse_known_args(args=('--spaces',) + spaces)[0] parsed_spaces = pargs.spaces assert isinstance(parsed_spaces, SpatialReferences) assert all( isinstance(sp, Reference) for sp in parsed_spaces.references - ), "Every element must be a `Reference`" + ), 'Every element must be a `Reference`' assert len(parsed_spaces.references) == len(expected) for ref, expected_ref in zip(parsed_spaces.references, expected): assert str(ref) == expected_ref -@pytest.mark.parametrize("flag,expected", [(("--spaces",), True), (None, False)]) +@pytest.mark.parametrize(('flag', 'expected'), [(('--spaces',), True), (None, False)]) def test_space_action_edgecases(parser, flag, expected): pargs = parser.parse_known_args(flag)[0] spaces = pargs.spaces diff --git a/niworkflows/utils/tests/test_utils.py b/niworkflows/utils/tests/test_utils.py index 6c0914212a7..952c55533fa 100644 --- a/niworkflows/utils/tests/test_utils.py +++ b/niworkflows/utils/tests/test_utils.py @@ -21,37 +21,39 @@ # https://www.nipreps.org/community/licensing/ # """Test utils""" + import os from pathlib import Path from subprocess import check_call + from niworkflows.utils.misc import _copy_any, clean_directory def test_copy_gzip(tmpdir): - filepath = tmpdir / "name1.txt" - filepath2 = tmpdir / "name2.txt" + filepath = tmpdir / 'name1.txt' + filepath2 = tmpdir / 'name2.txt' assert not filepath2.exists() - open(str(filepath), "w").close() - check_call(["gzip", "-N", str(filepath)]) + open(str(filepath), 'w').close() + check_call(['gzip', '-N', str(filepath)]) # noqa: S607 XXX replace with gzip module assert not filepath.exists() - gzpath1 = "%s/%s" % (tmpdir, "name1.txt.gz") - gzpath2 = "%s/%s" % (tmpdir, "name2.txt.gz") + gzpath1 = str(tmpdir / 'name1.txt.gz') + gzpath2 = str(tmpdir / 'name2.txt.gz') _copy_any(gzpath1, gzpath2) assert Path(gzpath2).exists() - check_call(["gunzip", "-N", "-f", gzpath2]) + check_call(['gunzip', '-N', '-f', gzpath2]) # noqa: S607 XXX replace with gzip module assert not filepath.exists() assert filepath2.exists() def test_clean_protected(tmp_path): - base = tmp_path / "cleanme" + base = tmp_path / 'cleanme' base.mkdir() empty_size = _size(str(base)) _gen_skeleton(base) # initial skeleton - readonly = base / "readfile" - readonly.write_text("delete me") + readonly = base / 'readfile' + readonly.write_text('delete me') readonly.chmod(0o444) assert empty_size < _size(str(base)) @@ -60,17 +62,17 @@ def test_clean_protected(tmp_path): def test_clean_symlink(tmp_path): - base = tmp_path / "cleanme" + base = tmp_path / 'cleanme' base.mkdir() empty_size = _size(str(base)) _gen_skeleton(base) # initial skeleton - keep = tmp_path / "keepme" + keep = tmp_path / 'keepme' keep.mkdir() - keepf = keep / "keepfile" - keepf.write_text("keep me") + keepf = keep / 'keepfile' + keepf.write_text('keep me') keep_size = _size(str(keep)) - slink = base / "slink" + slink = base / 'slink' slink.symlink_to(keep) assert empty_size < _size(str(base)) @@ -82,11 +84,11 @@ def test_clean_symlink(tmp_path): def _gen_skeleton(root): dirs, files = [], [] - files.append(root / "file1") - files.append(root / ".file2") - dirs.append(root / "subdir1") - files.append(dirs[0] / "file3") - files.append(dirs[0] / ".file4") + files.append(root / 'file1') + files.append(root / '.file2') + dirs.append(root / 'subdir1') + files.append(dirs[0] / 'file3') + files.append(dirs[0] / '.file4') for d in dirs: d.mkdir() for f in files: diff --git a/niworkflows/utils/timeseries.py b/niworkflows/utils/timeseries.py index 4b7199d674e..d66b11cd726 100644 --- a/niworkflows/utils/timeseries.py +++ b/niworkflows/utils/timeseries.py @@ -21,47 +21,43 @@ # https://www.nipreps.org/community/licensing/ # """Extracting signals from NIfTI and CIFTI2 files.""" -import numpy as np + import nibabel as nb +import numpy as np def _cifti_timeseries(dataset): """Extract timeseries from CIFTI2 dataset.""" dataset = nb.load(dataset) if isinstance(dataset, str) else dataset - if dataset.nifti_header.get_intent()[0] != "ConnDenseSeries": - raise ValueError("Not a dense timeseries") + if dataset.nifti_header.get_intent()[0] != 'ConnDenseSeries': + raise ValueError('Not a dense timeseries') matrix = dataset.header.matrix labels = { - "CIFTI_STRUCTURE_CORTEX_LEFT": "CtxL", - "CIFTI_STRUCTURE_CORTEX_RIGHT": "CtxR", - "CIFTI_STRUCTURE_CEREBELLUM_LEFT": "CbL", - "CIFTI_STRUCTURE_CEREBELLUM_RIGHT": "CbR", + 'CIFTI_STRUCTURE_CORTEX_LEFT': 'CtxL', + 'CIFTI_STRUCTURE_CORTEX_RIGHT': 'CtxR', + 'CIFTI_STRUCTURE_CEREBELLUM_LEFT': 'CbL', + 'CIFTI_STRUCTURE_CEREBELLUM_RIGHT': 'CbR', } - seg = {label: [] for label in list(labels.values()) + ["Other"]} + seg = {label: [] for label in list(labels.values()) + ['Other']} for bm in matrix.get_index_map(1).brain_models: - label = ( - "Other" if bm.brain_structure not in labels else - labels[bm.brain_structure] - ) - seg[label] += list(range( - bm.index_offset, bm.index_offset + bm.index_count - )) + label = 'Other' if bm.brain_structure not in labels else labels[bm.brain_structure] + seg[label] += list(range(bm.index_offset, bm.index_offset + bm.index_count)) - return dataset.get_fdata(dtype="float32").T, seg + return dataset.get_fdata(dtype='float32').T, seg def _nifti_timeseries( dataset, segmentation=None, - labels=("Ctx GM", "dGM", "WM+CSF", "Cb", "Crown"), + labels=('Ctx GM', 'dGM', 'WM+CSF', 'Cb', 'Crown'), remap_rois=False, lut=None, ): """Extract timeseries from NIfTI1/2 datasets.""" dataset = nb.load(dataset) if isinstance(dataset, str) else dataset - data = dataset.get_fdata(dtype="float32").reshape((-1, dataset.shape[-1])) + data = dataset.get_fdata(dtype='float32').reshape((-1, dataset.shape[-1])) if segmentation is None: return data, None @@ -75,11 +71,11 @@ def _nifti_timeseries( # Map segmentation if remap_rois or lut is not None: if lut is None: - lut = np.zeros((256,), dtype="uint8") + lut = np.zeros((256,), dtype='uint8') lut[100:201] = 1 # Ctx GM - lut[30:99] = 2 # dGM - lut[1:11] = 3 # WM+CSF - lut[255] = 4 # Cerebellum + lut[30:99] = 2 # dGM + lut[1:11] = 3 # WM+CSF + lut[255] = 4 # Cerebellum # Apply lookup table segmentation = lut[segmentation] diff --git a/niworkflows/viz/__init__.py b/niworkflows/viz/__init__.py index d2f5ad358aa..0a86420550c 100644 --- a/niworkflows/viz/__init__.py +++ b/niworkflows/viz/__init__.py @@ -9,6 +9,6 @@ 'Niworkflows will be deprecating visualizations in favor of a standalone library "nireports".' ) -warnings.warn(msg, PendingDeprecationWarning) +warnings.warn(msg, PendingDeprecationWarning, stacklevel=2) -__all__ = ["plot_carpet", "SVGNS"] +__all__ = ['plot_carpet', 'SVGNS'] diff --git a/niworkflows/viz/notebook.py b/niworkflows/viz/notebook.py index dc6aed68b12..da31720cfe2 100644 --- a/niworkflows/viz/notebook.py +++ b/niworkflows/viz/notebook.py @@ -21,10 +21,13 @@ # https://www.nipreps.org/community/licensing/ # """Visualization component for Jupyter Notebooks.""" + from pathlib import Path -import numpy as np + import nibabel as nb -from .utils import compose_view, plot_registration, cuts_from_bbox +import numpy as np + +from .utils import compose_view, cuts_from_bbox, plot_registration def display( @@ -32,11 +35,12 @@ def display( moving_image, contour=None, cuts=None, - fixed_label="F", - moving_label="M", + fixed_label='F', + moving_label='M', ): """Plot the flickering panels to show a registration process.""" - from IPython.display import SVG, display as _disp + from IPython.display import SVG + from IPython.display import display as _disp if isinstance(fixed_image, (str, Path)): fixed_image = nb.load(str(fixed_image)) @@ -51,9 +55,9 @@ def display( cuts = cuts_from_bbox(contour, cuts=n_cuts) else: hdr = fixed_image.header.copy() - hdr.set_data_dtype("uint8") + hdr.set_data_dtype('uint8') mask_nii = nb.Nifti1Image( - np.ones(fixed_image.shape, dtype="uint8"), fixed_image.affine, hdr + np.ones(fixed_image.shape, dtype='uint8'), fixed_image.affine, hdr ) cuts = cuts_from_bbox(mask_nii, cuts=n_cuts) @@ -63,7 +67,7 @@ def display( compose_view( plot_registration( fixed_image, - "fixed-image", + 'fixed-image', estimate_brightness=True, cuts=cuts, label=fixed_label, @@ -72,7 +76,7 @@ def display( ), plot_registration( moving_image, - "moving-image", + 'moving-image', estimate_brightness=True, cuts=cuts, label=moving_label, diff --git a/niworkflows/viz/plots.py b/niworkflows/viz/plots.py index a591ce6704b..3ae21769cf8 100644 --- a/niworkflows/viz/plots.py +++ b/niworkflows/viz/plots.py @@ -22,16 +22,15 @@ # """Plotting tools shared across MRIQC and fMRIPrep.""" -import numpy as np +import matplotlib.cm as cm +import matplotlib.pyplot as plt import nibabel as nb +import numpy as np import pandas as pd - -import matplotlib.pyplot as plt from matplotlib import colormaps from matplotlib import gridspec as mgs -import matplotlib.cm as cm -from matplotlib.colors import Normalize from matplotlib.colorbar import ColorbarBase +from matplotlib.colors import Normalize DINA4_LANDSCAPE = (11.69, 8.27) @@ -40,14 +39,14 @@ class fMRIPlot: """Generates the fMRI Summary Plot.""" __slots__ = ( - "timeseries", - "segments", - "tr", - "confounds", - "spikes", - "nskip", - "sort_carpet", - "paired_carpet", + 'timeseries', + 'segments', + 'tr', + 'confounds', + 'spikes', + 'nskip', + 'sort_carpet', + 'paired_carpet', ) def __init__( @@ -78,14 +77,14 @@ def __init__( vlines = {} self.confounds = {} if confounds is None and conf_file: - confounds = pd.read_csv(conf_file, sep=r"[\t\s]+", usecols=usecols, index_col=False) + confounds = pd.read_csv(conf_file, sep=r'[\t\s]+', usecols=usecols, index_col=False) if confounds is not None: for name in confounds.columns: self.confounds[name] = { - "values": confounds[[name]].values.squeeze().tolist(), - "units": units.get(name), - "cutoff": vlines.get(name), + 'values': confounds[[name]].values.squeeze().tolist(), + 'units': units.get(name), + 'cutoff': vlines.get(name), } self.spikes = [] @@ -97,8 +96,8 @@ def plot(self, figure=None): """Main plotter""" import seaborn as sns - sns.set_style("whitegrid") - sns.set_context("paper", font_scale=0.8) + sns.set_style('whitegrid') + sns.set_context('paper', font_scale=0.8) if figure is None: figure = plt.gcf() @@ -120,10 +119,10 @@ def plot(self, figure=None): if self.confounds: from seaborn import color_palette - palette = color_palette("husl", nconfounds) + palette = color_palette('husl', nconfounds) for i, (name, kwargs) in enumerate(self.confounds.items()): - tseries = kwargs.pop("values") + tseries = kwargs.pop('values') confoundplot(tseries, grid[grid_id], tr=self.tr, color=palette[i], name=name, **kwargs) grid_id += 1 @@ -134,7 +133,7 @@ def plot(self, figure=None): tr=self.tr, sort_rows=self.sort_carpet, drop_trs=self.nskip, - cmap="paired" if self.paired_carpet else None, + cmap='paired' if self.paired_carpet else None, ) return figure @@ -149,7 +148,7 @@ def plot_carpet( title=None, output_file=None, size=(900, 1200), - sort_rows="ward", + sort_rows='ward', drop_trs=0, legend=True, ): @@ -193,22 +192,21 @@ def plot_carpet( """ if segments is None: - segments = { - "whole brain (voxels)": list(range(data.shape[0])) - } + segments = {'whole brain (voxels)': list(range(data.shape[0]))} if len(segments) == 1: legend = False if cmap is None: - colors = colormaps["tab10"].colors - elif cmap == "paired": - colors = list(colormaps["Paired"].colors) + colors = colormaps['tab10'].colors + elif cmap == 'paired': + colors = list(colormaps['Paired'].colors) colors[0], colors[1] = colors[1], colors[0] colors[2], colors[7] = colors[7], colors[2] if detrend: from nilearn.signal import clean + data = clean(data.T, t_r=tr, filter=False).T # We want all subplots to have the same dynamic range @@ -223,7 +221,7 @@ def plot_carpet( # Cluster segments (if argument enabled) if sort_rows: - from scipy.cluster.hierarchy import linkage, dendrogram + from scipy.cluster.hierarchy import dendrogram, linkage from sklearn.cluster import ward_tree for seg_label, seg_idx in segments.items(): @@ -231,9 +229,9 @@ def plot_carpet( if len(seg_idx) < 2: continue roi_data = data[seg_idx] - if isinstance(sort_rows, str) and sort_rows.lower() == "linkage": + if isinstance(sort_rows, str) and sort_rows.lower() == 'linkage': linkage_matrix = linkage( - roi_data, method="average", metric="euclidean", optimal_ordering=True + roi_data, method='average', metric='euclidean', optimal_ordering=True ) else: children, _, n_leaves, _, distances = ward_tree(roi_data, return_distance=True) @@ -241,7 +239,7 @@ def plot_carpet( dn = dendrogram(linkage_matrix, no_plot=True) # Override the ordering of the indices in this segment - segments[seg_label] = np.array(seg_idx)[np.array(dn["leaves"])] + segments[seg_label] = np.array(seg_idx)[np.array(dn['leaves'])] # If subplot is not defined if subplot is None: @@ -260,33 +258,33 @@ def plot_carpet( 1, subplot_spec=subplot, hspace=0.05, - height_ratios=[len(v) for v in segments.values()] + height_ratios=[len(v) for v in segments.values()], ) - for i, (label, indices) in enumerate(segments.items()): + for i, indices in enumerate(segments.values()): # Carpet plot ax = plt.subplot(gs[i]) ax.imshow( data[indices, :], - interpolation="nearest", - aspect="auto", - cmap="gray", + interpolation='nearest', + aspect='auto', + cmap='gray', vmin=vminmax[0], vmax=vminmax[1], ) # Toggle the spine objects - ax.spines["top"].set_color("none") - ax.spines["top"].set_visible(False) - ax.spines["right"].set_color("none") - ax.spines["right"].set_visible(False) + ax.spines['top'].set_color('none') + ax.spines['top'].set_visible(False) + ax.spines['right'].set_color('none') + ax.spines['right'].set_visible(False) # Make colored left axis - ax.spines["left"].set_linewidth(3) - ax.spines["left"].set_color(colors[i]) - ax.spines["left"].set_capstyle("butt") - ax.spines["left"].set_position(("outward", 2)) + ax.spines['left'].set_linewidth(3) + ax.spines['left'].set_color(colors[i]) + ax.spines['left'].set_capstyle('butt') + ax.spines['left'].set_position(('outward', 2)) # Make all subplots have same xticks xticks = np.linspace(0, data.shape[-1], endpoint=True, num=7) @@ -295,31 +293,31 @@ def plot_carpet( ax.grid(False) if i == (len(segments) - 1): - xlabel = "time-points (index)" - xticklabels = (xticks * n_trs / data.shape[-1]).astype("uint32") + drop_trs + xlabel = 'time-points (index)' + xticklabels = (xticks * n_trs / data.shape[-1]).astype('uint32') + drop_trs if tr is not None: - xlabel = "time (mm:ss)" + xlabel = 'time (mm:ss)' xticklabels = [ - f"{int(t // 60):02d}:{(t % 60).round(0).astype(int):02d}" + f'{int(t // 60):02d}:{(t % 60).round(0).astype(int):02d}' for t in (tr * xticklabels) ] ax.set_xlabel(xlabel) ax.set_xticklabels(xticklabels) - ax.spines["bottom"].set_position(("outward", 5)) - ax.spines["bottom"].set_color("k") - ax.spines["bottom"].set_linewidth(.8) + ax.spines['bottom'].set_position(('outward', 5)) + ax.spines['bottom'].set_color('k') + ax.spines['bottom'].set_linewidth(0.8) else: ax.set_xticklabels([]) ax.set_xticks([]) - ax.spines["bottom"].set_color("none") - ax.spines["bottom"].set_visible(False) + ax.spines['bottom'].set_color('none') + ax.spines['bottom'].set_visible(False) if title and i == 0: ax.set_title(title) if len(segments) == 1: - ax.set_ylabel(label) + ax.set_ylabel(next(iter(segments))) if legend: from matplotlib.patches import Patch @@ -327,7 +325,7 @@ def plot_carpet( axlegend = inset_axes( ax, - width="100%", + width='100%', height=0.01, loc='lower center', borderpad=-4.1, @@ -336,27 +334,24 @@ def plot_carpet( axlegend.set_xticks([]) axlegend.set_yticks([]) axlegend.patch.set_alpha(0.0) - for loc in ("top", "bottom", "left", "right"): - axlegend.spines[loc].set_color("none") + for loc in ('top', 'bottom', 'left', 'right'): + axlegend.spines[loc].set_color('none') axlegend.spines[loc].set_visible(False) axlegend.legend( - handles=[ - Patch(color=colors[i], label=l) - for i, l in enumerate(segments.keys()) - ], - loc="upper center", + handles=[Patch(color=colors[i], label=label) for i, label in enumerate(segments)], + loc='upper center', bbox_to_anchor=(0.5, 0), shadow=False, fancybox=False, ncol=min(len(segments.keys()), 5), frameon=False, - prop={'size': 8} + prop={'size': 8}, ) if output_file is not None: figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") + figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file @@ -370,9 +365,9 @@ def spikesplot( tr=None, zscored=True, spike_thresh=6.0, - title="Spike plot", + title='Spike plot', ax=None, - cmap="viridis", + cmap='viridis', hide_x=True, nskip=0, ): @@ -409,9 +404,9 @@ def spikesplot( ax.plot(ts_z[sl, :], color=colors[sl], lw=0.5) else: markerline, stemlines, baseline = ax.stem(ts_z[sl, :]) - plt.setp(markerline, "markerfacecolor", colors[sl]) - plt.setp(baseline, "color", colors[sl], "linewidth", 1) - plt.setp(stemlines, "color", colors[sl], "linewidth", 1) + plt.setp(markerline, 'markerfacecolor', colors[sl]) + plt.setp(baseline, 'color', colors[sl], 'linewidth', 1) + plt.setp(stemlines, 'color', colors[sl], 'linewidth', 1) # Handle X, Y axes ax.grid(False) @@ -424,15 +419,15 @@ def spikesplot( if not hide_x: if tr is None: - ax.set_xlabel("time (frame #)") + ax.set_xlabel('time (frame #)') else: - ax.set_xlabel("time (s)") - ax.set_xticklabels(["%.02f" % t for t in (tr * np.array(xticks)).tolist()]) + ax.set_xlabel('time (s)') + ax.set_xticklabels([f'{t:.2f}' for t in (tr * np.array(xticks)).tolist()]) # Handle Y axis - ylabel = "slice-wise noise average on background" + ylabel = 'slice-wise noise average on background' if zscored: - ylabel += " (z-scored)" + ylabel += ' (z-scored)' zs_max = np.abs(ts_z).max() ax.set_ylim( ( @@ -448,13 +443,13 @@ def spikesplot( # yticks.insert(0, ts_z.min()) # yticks += [ts_z.max()] for val in ytick_vals: - ax.plot((0, ntsteps - 1), (-val, -val), "k:", alpha=0.2) - ax.plot((0, ntsteps - 1), (val, val), "k:", alpha=0.2) + ax.plot((0, ntsteps - 1), (-val, -val), 'k:', alpha=0.2) + ax.plot((0, ntsteps - 1), (val, val), 'k:', alpha=0.2) # Plot spike threshold if zs_max < spike_thresh: - ax.plot((0, ntsteps - 1), (-spike_thresh, -spike_thresh), "k:") - ax.plot((0, ntsteps - 1), (spike_thresh, spike_thresh), "k:") + ax.plot((0, ntsteps - 1), (-spike_thresh, -spike_thresh), 'k:') + ax.plot((0, ntsteps - 1), (spike_thresh, spike_thresh), 'k:') else: yticks = [ ts_z[:, nskip:].min(), @@ -468,20 +463,20 @@ def spikesplot( ax.annotate( ylabel, xy=(0.0, 0.7), - xycoords="axes fraction", + xycoords='axes fraction', xytext=(0, 0), - textcoords="offset points", - va="center", - ha="left", - color="gray", + textcoords='offset points', + va='center', + ha='left', + color='gray', size=4, bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, + 'boxstyle': 'round', + 'fc': 'w', + 'ec': 'none', + 'color': 'none', + 'lw': 0, + 'alpha': 0.8, }, ) ax.set_yticks([]) @@ -494,21 +489,21 @@ def spikesplot( # ax.plot((0, ntsteps - 1), (yticks[0], yticks[0]), 'k:') # ax.plot((0, ntsteps - 1), (yticks[-1], yticks[-1]), 'k:') - for side in ["top", "right"]: - ax.spines[side].set_color("none") + for side in ['top', 'right']: + ax.spines[side].set_color('none') ax.spines[side].set_visible(False) if not hide_x: - ax.spines["bottom"].set_position(("outward", 10)) - ax.xaxis.set_ticks_position("bottom") + ax.spines['bottom'].set_position(('outward', 10)) + ax.xaxis.set_ticks_position('bottom') else: - ax.spines["bottom"].set_color("none") - ax.spines["bottom"].set_visible(False) + ax.spines['bottom'].set_color('none') + ax.spines['bottom'].set_visible(False) # ax.spines["left"].set_position(('outward', 30)) # ax.yaxis.set_ticks_position('left') - ax.spines["left"].set_visible(False) - ax.spines["left"].set_color(None) + ax.spines['left'].set_visible(False) + ax.spines['left'].set_color(None) # labels = [label for label in ax.yaxis.get_ticklabels()] # labels[0].set_weight('bold') @@ -518,7 +513,7 @@ def spikesplot( return ax -def spikesplot_cb(position, cmap="viridis", fig=None): +def spikesplot_cb(position, cmap='viridis', fig=None): # Add colorbar if fig is None: fig = plt.gcf() @@ -527,12 +522,12 @@ def spikesplot_cb(position, cmap="viridis", fig=None): cb = ColorbarBase( cax, cmap=colormaps[cmap], - spacing="proportional", - orientation="horizontal", + spacing='proportional', + orientation='horizontal', drawedges=False, ) cb.set_ticks([0, 0.5, 1.0]) - cb.set_ticklabels(["Inferior", "(axial slice)", "Superior"]) + cb.set_ticklabels(['Inferior', '(axial slice)', 'Superior']) cb.outline.set_linewidth(0) cb.ax.xaxis.set_tick_params(width=0) return cax @@ -546,7 +541,7 @@ def confoundplot( units=None, tr=None, hide_x=True, - color="b", + color='b', nskip=0, cutoff=None, ylims=None, @@ -574,52 +569,52 @@ def confoundplot( if not hide_x: if notr: - ax_ts.set_xlabel("time (frame #)") + ax_ts.set_xlabel('time (frame #)') else: - ax_ts.set_xlabel("time (s)") + ax_ts.set_xlabel('time (s)') labels = tr * np.array(xticks) - ax_ts.set_xticklabels(["%.02f" % t for t in labels.tolist()]) + ax_ts.set_xticklabels([f'{t:.2f}' for t in labels.tolist()]) else: ax_ts.set_xticklabels([]) if name is not None: if units is not None: - name += " [%s]" % units + name += f' [{units}]' ax_ts.annotate( name, xy=(0.0, 0.7), xytext=(0, 0), - xycoords="axes fraction", - textcoords="offset points", - va="center", - ha="left", + xycoords='axes fraction', + textcoords='offset points', + va='center', + ha='left', color=color, size=8, bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, + 'boxstyle': 'round', + 'fc': 'w', + 'ec': 'none', + 'color': 'none', + 'lw': 0, + 'alpha': 0.8, }, ) - for side in ["top", "right"]: - ax_ts.spines[side].set_color("none") + for side in ['top', 'right']: + ax_ts.spines[side].set_color('none') ax_ts.spines[side].set_visible(False) if not hide_x: - ax_ts.spines["bottom"].set_position(("outward", 20)) - ax_ts.xaxis.set_ticks_position("bottom") + ax_ts.spines['bottom'].set_position(('outward', 20)) + ax_ts.xaxis.set_ticks_position('bottom') else: - ax_ts.spines["bottom"].set_color("none") - ax_ts.spines["bottom"].set_visible(False) + ax_ts.spines['bottom'].set_color('none') + ax_ts.spines['bottom'].set_visible(False) # ax_ts.spines["left"].set_position(('outward', 30)) - ax_ts.spines["left"].set_color("none") - ax_ts.spines["left"].set_visible(False) + ax_ts.spines['left'].set_color('none') + ax_ts.spines['left'].set_visible(False) # ax_ts.yaxis.set_ticks_position('left') ax_ts.set_yticks([]) @@ -653,39 +648,39 @@ def confoundplot( p95 = 0 stats_label = ( - r"max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} " - r"$\bullet$ $\sigma$: {sigma:.3f}" - ).format(max=maxv, mean=mean, units=units or "", sigma=stdv) + r'max: {max:.3f}{units} $\bullet$ mean: {mean:.3f}{units} ' + r'$\bullet$ $\sigma$: {sigma:.3f}' + ).format(max=maxv, mean=mean, units=units or '', sigma=stdv) ax_ts.annotate( stats_label, xy=(0.98, 0.7), - xycoords="axes fraction", + xycoords='axes fraction', xytext=(0, 0), - textcoords="offset points", - va="center", - ha="right", + textcoords='offset points', + va='center', + ha='right', color=color, size=4, bbox={ - "boxstyle": "round", - "fc": "w", - "ec": "none", - "color": "none", - "lw": 0, - "alpha": 0.8, + 'boxstyle': 'round', + 'fc': 'w', + 'ec': 'none', + 'color': 'none', + 'lw': 0, + 'alpha': 0.8, }, ) # Annotate percentile 95 - ax_ts.plot((0, ntsteps - 1), [p95] * 2, linewidth=0.1, color="lightgray") + ax_ts.plot((0, ntsteps - 1), [p95] * 2, linewidth=0.1, color='lightgray') ax_ts.annotate( - "%.2f" % p95, + f'{p95:.2f}', xy=(0, p95), xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="lightgray", + textcoords='offset points', + va='center', + ha='right', + color='lightgray', size=3, ) @@ -693,16 +688,16 @@ def confoundplot( cutoff = [] for thr in cutoff: - ax_ts.plot((0, ntsteps - 1), [thr] * 2, linewidth=0.2, color="dimgray") + ax_ts.plot((0, ntsteps - 1), [thr] * 2, linewidth=0.2, color='dimgray') ax_ts.annotate( - "%.2f" % thr, + f'{thr:.2f}', xy=(0, thr), xytext=(-1, 0), - textcoords="offset points", - va="center", - ha="right", - color="dimgray", + textcoords='offset points', + va='center', + ha='right', + color='dimgray', size=3, ) @@ -712,7 +707,7 @@ def confoundplot( if gs_dist is not None: ax_dist = plt.subplot(gs_dist) sns.displot(tseries, vertical=True, ax=ax_dist) - ax_dist.set_xlabel("Timesteps") + ax_dist.set_xlabel('Timesteps') ax_dist.set_ylim(ax_ts.get_ylim()) ax_dist.set_yticklabels([]) @@ -759,28 +754,28 @@ def compcor_variance_plot( metadata = {} if metadata_sources is None: if len(metadata_files) == 1: - metadata_sources = ["CompCor"] + metadata_sources = ['CompCor'] else: - metadata_sources = ["Decomposition {:d}".format(i) for i in range(len(metadata_files))] + metadata_sources = [f'Decomposition {i:d}' for i in range(len(metadata_files))] for file, source in zip(metadata_files, metadata_sources): - metadata[source] = pd.read_csv(str(file), sep=r"\s+") - metadata[source]["source"] = source + metadata[source] = pd.read_csv(str(file), sep=r'\s+') + metadata[source]['source'] = source metadata = pd.concat(list(metadata.values())) bbox_txt = { - "boxstyle": "round", - "fc": "white", - "ec": "none", - "color": "none", - "linewidth": 0, - "alpha": 0.8, + 'boxstyle': 'round', + 'fc': 'white', + 'ec': 'none', + 'color': 'none', + 'linewidth': 0, + 'alpha': 0.8, } decompositions = [] - data_sources = list(metadata.groupby(["source", "mask"]).groups.keys()) + data_sources = list(metadata.groupby(['source', 'mask']).groups.keys()) for source, mask in data_sources: if not np.isnan( - metadata.loc[(metadata["source"] == source) & (metadata["mask"] == mask)][ - "singular_value" + metadata.loc[(metadata['source'] == source) & (metadata['mask'] == mask)][ + 'singular_value' ].values[0] ): decompositions.append((source, mask)) @@ -793,62 +788,62 @@ def compcor_variance_plot( ax = [plt.axes()] for m, (source, mask) in enumerate(decompositions): - components = metadata[(metadata["mask"] == mask) & (metadata["source"] == source)] + components = metadata[(metadata['mask'] == mask) & (metadata['source'] == source)] if len([m for s, m in decompositions if s == source]) > 1: - title_mask = " ({} mask)".format(mask) + title_mask = f' ({mask} mask)' else: - title_mask = "" - fig_title = "{}{}".format(source, title_mask) + title_mask = '' + fig_title = f'{source}{title_mask}' ax[m].plot( np.arange(components.shape[0] + 1), - [0] + list(100 * components["cumulative_variance_explained"]), - color="purple", + [0] + list(100 * components['cumulative_variance_explained']), + color='purple', linewidth=2.5, ) ax[m].grid(False) - ax[m].set_xlabel("number of components in model") - ax[m].set_ylabel("cumulative variance explained (%)") + ax[m].set_xlabel('number of components in model') + ax[m].set_ylabel('cumulative variance explained (%)') ax[m].set_title(fig_title) varexp = {} for i, thr in enumerate(varexp_thresh): varexp[thr] = ( - np.atleast_1d(np.searchsorted(components["cumulative_variance_explained"], thr)) + np.atleast_1d(np.searchsorted(components['cumulative_variance_explained'], thr)) + 1 ) - ax[m].axhline(y=100 * thr, color="lightgrey", linewidth=0.25) - ax[m].axvline(x=varexp[thr], color="C{}".format(i), linewidth=2, linestyle=":") + ax[m].axhline(y=100 * thr, color='lightgrey', linewidth=0.25) + ax[m].axvline(x=varexp[thr], color=f'C{i}', linewidth=2, linestyle=':') ax[m].text( 0, 100 * thr, - "{:.0f}".format(100 * thr), - fontsize="x-small", + f'{100 * thr:.0f}', + fontsize='x-small', bbox=bbox_txt, ) ax[m].text( varexp[thr][0], 25, - "{} components explain\n{:.0f}% of variance".format(varexp[thr][0], 100 * thr), + f'{varexp[thr][0]} components explain\n{100 * thr:.0f}% of variance', rotation=90, - horizontalalignment="center", - fontsize="xx-small", + horizontalalignment='center', + fontsize='xx-small', bbox=bbox_txt, ) ax[m].set_yticks([]) ax[m].set_yticklabels([]) for label in ax[m].xaxis.get_majorticklabels(): - label.set_fontsize("x-small") - label.set_rotation("vertical") - for side in ["top", "right", "left"]: - ax[m].spines[side].set_color("none") + label.set_fontsize('x-small') + label.set_rotation('vertical') + for side in ['top', 'right', 'left']: + ax[m].spines[side].set_color('none') ax[m].spines[side].set_visible(False) if output_file is not None: figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") + figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file @@ -861,7 +856,7 @@ def confounds_correlation_plot( figure=None, max_dim=20, output_file=None, - reference="global_signal", + reference='global_signal', ignore_initial_volumes=0, ): """ @@ -917,9 +912,9 @@ def confounds_correlation_plot( corr = confounds_data.corr() gscorr = corr.copy() - gscorr["index"] = gscorr.index + gscorr['index'] = gscorr.index gscorr[reference] = np.abs(gscorr[reference]) - gs_descending = gscorr.sort_values(by=reference, ascending=False)["index"] + gs_descending = gscorr.sort_values(by=reference, ascending=False)['index'] n_vars = corr.shape[0] max_dim = min(n_vars, max_dim) @@ -940,40 +935,40 @@ def confounds_correlation_plot( mask = np.zeros_like(corr, dtype=bool) mask[np.triu_indices_from(mask)] = True - sns.heatmap(corr, linewidths=0.5, cmap="coolwarm", center=0, square=True, ax=ax0) - ax0.tick_params(axis="both", which="both", width=0) + sns.heatmap(corr, linewidths=0.5, cmap='coolwarm', center=0, square=True, ax=ax0) + ax0.tick_params(axis='both', which='both', width=0) for label in ax0.xaxis.get_majorticklabels(): - label.set_fontsize("small") + label.set_fontsize('small') for label in ax0.yaxis.get_majorticklabels(): - label.set_fontsize("small") + label.set_fontsize('small') sns.barplot( data=gscorr, - x="index", + x='index', y=reference, ax=ax1, order=gs_descending, - palette="Reds_d", + palette='Reds_d', saturation=0.5, ) - ax1.set_xlabel("Confound time series") - ax1.set_ylabel("Magnitude of correlation with {}".format(reference)) - ax1.tick_params(axis="x", which="both", width=0) - ax1.tick_params(axis="y", which="both", width=5, length=5) + ax1.set_xlabel('Confound time series') + ax1.set_ylabel(f'Magnitude of correlation with {reference}') + ax1.tick_params(axis='x', which='both', width=0) + ax1.tick_params(axis='y', which='both', width=5, length=5) for label in ax1.xaxis.get_majorticklabels(): - label.set_fontsize("small") - label.set_rotation("vertical") + label.set_fontsize('small') + label.set_rotation('vertical') for label in ax1.yaxis.get_majorticklabels(): - label.set_fontsize("small") - for side in ["top", "right", "left"]: - ax1.spines[side].set_color("none") + label.set_fontsize('small') + for side in ['top', 'right', 'left']: + ax1.spines[side].set_color('none') ax1.spines[side].set_visible(False) if output_file is not None: figure = plt.gcf() - figure.savefig(output_file, bbox_inches="tight") + figure.savefig(output_file, bbox_inches='tight') plt.close(figure) figure = None return output_file @@ -982,8 +977,8 @@ def confounds_correlation_plot( def cifti_surfaces_plot( in_cifti, - density="32k", - surface_type="inflated", + density='32k', + surface_type='inflated', clip_range=(0, None), output_file=None, **kwargs, @@ -1021,34 +1016,34 @@ def cifti_surfaces_plot( def get_surface_meshes(density, surface_type): import templateflow.api as tf - lh, rh = tf.get("fsLR", density=density, suffix=surface_type, extension=[".surf.gii"]) + lh, rh = tf.get('fsLR', density=density, suffix=surface_type, extension=['.surf.gii']) return str(lh), str(rh) - if density != "32k": - raise NotImplementedError("Only 32k density is currently supported.") + if density != '32k': + raise NotImplementedError('Only 32k density is currently supported.') img = nb.cifti2.load(in_cifti) - if img.nifti_header.get_intent()[0] != "ConnDenseSeries": - raise TypeError(f"{in_cifti} is not a dense timeseries CIFTI file") + if img.nifti_header.get_intent()[0] != 'ConnDenseSeries': + raise TypeError(f'{in_cifti} is not a dense timeseries CIFTI file') geo = img.header.get_index_map(1) left_cortex, right_cortex = None, None for bm in geo.brain_models: - if bm.brain_structure == "CIFTI_STRUCTURE_CORTEX_LEFT": + if bm.brain_structure == 'CIFTI_STRUCTURE_CORTEX_LEFT': left_cortex = bm - elif bm.brain_structure == "CIFTI_STRUCTURE_CORTEX_RIGHT": + elif bm.brain_structure == 'CIFTI_STRUCTURE_CORTEX_RIGHT': right_cortex = bm if left_cortex is None or right_cortex is None: - raise RuntimeError("CIFTI is missing cortex information") + raise RuntimeError('CIFTI is missing cortex information') # calculate an average of the BOLD data, excluding the first 5 volumes # as potential nonsteady states data = img.dataobj[5:20].mean(axis=0) counts = (left_cortex.index_count, right_cortex.index_count) - if density == "32k" and counts != (29696, 29716): - raise ValueError("Cortex data is not in fsLR space") + if density == '32k' and counts != (29696, 29716): + raise ValueError('Cortex data is not in fsLR space') # medial wall needs to be added back in lh_data = np.full(left_cortex.surface_number_of_vertices, np.nan) @@ -1078,8 +1073,8 @@ def get_surface_meshes(density, surface_type): rh_bg[:2] = [3, -2] lh_mesh, rh_mesh = get_surface_meshes(density, surface_type) - lh_kwargs = dict(surf_mesh=lh_mesh, surf_map=lh_data, bg_map=lh_bg) - rh_kwargs = dict(surf_mesh=rh_mesh, surf_map=rh_data, bg_map=rh_bg) + lh_kwargs = {'surf_mesh': lh_mesh, 'surf_map': lh_data, 'bg_map': lh_bg} + rh_kwargs = {'surf_mesh': rh_mesh, 'surf_map': rh_data, 'bg_map': rh_bg} # Build the figure figure = plt.figure(figsize=plt.figaspect(0.25), constrained_layout=True) @@ -1097,7 +1092,7 @@ def get_surface_meshes(density, surface_type): vmax=mx, axes=ax, **hemi_kwargs, - **kwargs + **kwargs, ) # plot_surf sets this to 8, which seems a little far out, but 6 starts clipping ax.dist = 7 @@ -1105,7 +1100,7 @@ def get_surface_meshes(density, surface_type): figure.colorbar(cbar_map, shrink=0.2, ax=figure.axes, location='bottom') if output_file is not None: - figure.savefig(output_file, bbox_inches="tight", dpi=400) + figure.savefig(output_file, bbox_inches='tight', dpi=400) plt.close(figure) return output_file @@ -1116,7 +1111,7 @@ def _concat_brain_struct_data(structs, data): concat_data = np.array([], dtype=data.dtype) for struct in structs: struct_upper_bound = struct.index_offset + struct.index_count - struct_data = data[struct.index_offset:struct_upper_bound] + struct_data = data[struct.index_offset : struct_upper_bound] concat_data = np.concatenate((concat_data, struct_data)) return concat_data diff --git a/niworkflows/viz/utils.py b/niworkflows/viz/utils.py index cc932b9e2d0..a82ce564925 100644 --- a/niworkflows/viz/utils.py +++ b/niworkflows/viz/utils.py @@ -21,51 +21,49 @@ # https://www.nipreps.org/community/licensing/ # """Helper tools for visualization purposes.""" + +import base64 +import re +import subprocess +from io import StringIO from pathlib import Path from shutil import which from tempfile import TemporaryDirectory -import subprocess -import base64 -import re from uuid import uuid4 -from io import StringIO -import numpy as np import nibabel as nb - +import numpy as np from nipype.utils import filemanip -from .. import NIWORKFLOWS_LOG -from ..utils.images import rotation2canonical, rotate_affine +from .. import NIWORKFLOWS_LOG +from ..utils.images import rotate_affine, rotation2canonical -SVGNS = "http://www.w3.org/2000/svg" +SVGNS = 'http://www.w3.org/2000/svg' def robust_set_limits(data, plot_params, percentiles=(15, 99.8)): """Set (vmax, vmin) based on percentiles of the data.""" - plot_params["vmin"] = plot_params.get("vmin", np.percentile(data, percentiles[0])) - plot_params["vmax"] = plot_params.get("vmax", np.percentile(data, percentiles[1])) + plot_params['vmin'] = plot_params.get('vmin', np.percentile(data, percentiles[0])) + plot_params['vmax'] = plot_params.get('vmax', np.percentile(data, percentiles[1])) return plot_params -def svg_compress(image, compress="auto"): +def svg_compress(image, compress='auto'): """Generate a blob SVG from a matplotlib figure, may perform compression.""" # Check availability of svgo and cwebp - has_compress = all((which("svgo"), which("cwebp"))) + has_compress = all((which('svgo'), which('cwebp'))) if compress is True and not has_compress: - raise RuntimeError( - "Compression is required, but svgo or cwebp are not installed" - ) + raise RuntimeError('Compression is required, but svgo or cwebp are not installed') else: - compress = (compress is True or compress == "auto") and has_compress + compress = (compress is True or compress == 'auto') and has_compress # Compress the SVG file using SVGO if compress: - cmd = "svgo -i - -o - -q -p 3 --pretty" + cmd = 'svgo -i - -o - -q -p 3 --pretty' try: - pout = subprocess.run( + pout = subprocess.run( # noqa: S602 cmd, - input=image.encode("utf-8"), + input=image.encode('utf-8'), stdout=subprocess.PIPE, shell=True, check=True, @@ -77,28 +75,28 @@ def svg_compress(image, compress="auto"): if compress is True and e.errno == ENOENT: raise e else: - image = pout.decode("utf-8") + image = pout.decode('utf-8') # Convert all of the rasters inside the SVG file with 80% compressed WEBP if compress: new_lines = [] with StringIO(image) as fp: for line in fp: - if "image/png" in line: + if 'image/png' in line: tmp_lines = [line] - while "/>" not in line: + while '/>' not in line: line = fp.readline() tmp_lines.append(line) - content = "".join(tmp_lines).replace("\n", "").replace(", ", ",") + content = ''.join(tmp_lines).replace('\n', '').replace(', ', ',') - left = content.split("base64,")[0] + "base64," - left = left.replace("image/png", "image/webp") - right = content.split("base64,")[1] + left = content.split('base64,')[0] + 'base64,' + left = left.replace('image/png', 'image/webp') + right = content.split('base64,')[1] png_b64 = right.split('"')[0] right = '"' + '"'.join(right.split('"')[1:]) - cmd = "cwebp -quiet -noalpha -q 80 -o - -- -" - pout = subprocess.run( + cmd = 'cwebp -quiet -noalpha -q 80 -o - -- -' + pout = subprocess.run( # noqa: S602 cmd, input=base64.b64decode(png_b64), shell=True, @@ -106,7 +104,7 @@ def svg_compress(image, compress="auto"): check=True, close_fds=True, ).stdout - webpimg = base64.b64encode(pout).decode("utf-8") + webpimg = base64.b64encode(pout).decode('utf-8') new_lines.append(left + webpimg + right) else: new_lines.append(line) @@ -116,12 +114,12 @@ def svg_compress(image, compress="auto"): svg_start = 0 for i, line in enumerate(lines): - if " 0: # missing may be negative from seaborn import color_palette - colors = colors + color_palette("husl", missing) + colors = colors + color_palette('husl', missing) colors = [[c] if not isinstance(c, list) else c for c in colors] @@ -299,12 +296,12 @@ def _plot_anat_with_contours(image, segs=None, compress="auto", **plot_params): display = plot_anat(image, **plot_params) # remove plot_anat -specific parameters - plot_params.pop("display_mode") - plot_params.pop("cut_coords") + plot_params.pop('display_mode') + plot_params.pop('cut_coords') - plot_params["linewidths"] = 0.5 + plot_params['linewidths'] = 0.5 for i in reversed(range(nsegs)): - plot_params["colors"] = colors[i] + plot_params['colors'] = colors[i] display.add_contours(segs[i], levels=levels[i], **plot_params) svg = extract_svg(display, compress=compress) @@ -316,21 +313,21 @@ def plot_registration( anat_nii, div_id, plot_params=None, - order=("z", "x", "y"), + order=('z', 'x', 'y'), cuts=None, estimate_brightness=False, label=None, contour=None, - compress="auto", + compress='auto', dismiss_affine=False, ): """ Plots the foreground and background views Default order is: axial, coronal, sagittal """ - from svgutils.transform import fromstring - from nilearn.plotting import plot_anat from nilearn import image as nlimage + from nilearn.plotting import plot_anat + from svgutils.transform import fromstring plot_params = {} if plot_params is None else plot_params @@ -369,42 +366,42 @@ def plot_registration( # Plot each cut axis for i, mode in enumerate(list(order)): - plot_params["display_mode"] = mode - plot_params["cut_coords"] = cuts[mode] + plot_params['display_mode'] = mode + plot_params['cut_coords'] = cuts[mode] if i == 0: - plot_params["title"] = label + plot_params['title'] = label else: - plot_params["title"] = None + plot_params['title'] = None # Generate nilearn figure display = plot_anat(anat_nii, **plot_params) if ribbon: - kwargs = {"levels": [0.5], "linewidths": 0.5} - display.add_contours(white, colors="b", **kwargs) - display.add_contours(pial, colors="r", **kwargs) + kwargs = {'levels': [0.5], 'linewidths': 0.5} + display.add_contours(white, colors='b', **kwargs) + display.add_contours(pial, colors='r', **kwargs) elif contour is not None: - display.add_contours(contour, colors="r", levels=[0.5], linewidths=0.5) + display.add_contours(contour, colors='r', levels=[0.5], linewidths=0.5) svg = extract_svg(display, compress=compress) display.close() # Find and replace the figure_1 id. - svg = svg.replace("figure_1", "%s-%s-%s" % (div_id, mode, uuid4()), 1) + svg = svg.replace('figure_1', f'{div_id}-{mode}-{uuid4()}', 1) out_files.append(fromstring(svg)) return out_files -def compose_view(bg_svgs, fg_svgs, ref=0, out_file="report.svg"): +def compose_view(bg_svgs, fg_svgs, ref=0, out_file='report.svg'): """Compose the input svgs into one standalone svg with CSS flickering animation.""" out_file = Path(out_file).absolute() - out_file.write_text("\n".join(_compose_view(bg_svgs, fg_svgs, ref=ref))) + out_file.write_text('\n'.join(_compose_view(bg_svgs, fg_svgs, ref=ref))) return str(out_file) def _compose_view(bg_svgs, fg_svgs, ref=0): from svgutils.compose import Unit - from svgutils.transform import SVGFigure, GroupElement + from svgutils.transform import GroupElement, SVGFigure if fg_svgs is None: fg_svgs = [] @@ -416,7 +413,7 @@ def _compose_view(bg_svgs, fg_svgs, ref=0): # Query the size of each sizes = [] for f in svgs: - viewbox = [float(v) for v in f.root.get("viewBox").split(" ")] + viewbox = [float(v) for v in f.root.get('viewBox').split(' ')] width = int(viewbox[2]) height = int(viewbox[3]) sizes.append((width, height)) @@ -431,7 +428,7 @@ def _compose_view(bg_svgs, fg_svgs, ref=0): # Compose the views panel: total size is the width of # any element (used the first here) and the sum of heights - fig = SVGFigure(Unit(f"{width}px"), Unit(f"{heights[:nsvgs].sum()}px")) + fig = SVGFigure(Unit(f'{width}px'), Unit(f'{heights[:nsvgs].sum()}px')) yoffset = 0 for i, r in enumerate(roots): @@ -444,24 +441,24 @@ def _compose_view(bg_svgs, fg_svgs, ref=0): # Group background and foreground panels in two groups if fg_svgs: newroots = [ - GroupElement(roots[:nsvgs], {"class": "background-svg"}), - GroupElement(roots[nsvgs:], {"class": "foreground-svg"}), + GroupElement(roots[:nsvgs], {'class': 'background-svg'}), + GroupElement(roots[nsvgs:], {'class': 'foreground-svg'}), ] else: newroots = roots fig.append(newroots) - fig.root.attrib.pop("width", None) - fig.root.attrib.pop("height", None) - fig.root.set("preserveAspectRatio", "xMidYMid meet") + fig.root.attrib.pop('width', None) + fig.root.attrib.pop('height', None) + fig.root.set('preserveAspectRatio', 'xMidYMid meet') with TemporaryDirectory() as tmpdirname: - out_file = Path(tmpdirname) / "tmp.svg" + out_file = Path(tmpdirname) / 'tmp.svg' fig.save(str(out_file)) # Post processing svg = out_file.read_text().splitlines() # Remove -@keyframes flickerAnimation%s { 0%% {opacity: 1;} 100%% { opacity: 0; }} -.foreground-svg { animation: 1s ease-in-out 0s alternate none infinite paused flickerAnimation%s;} -.foreground-svg:hover { animation-play-state: running;} -""" - % tuple([uuid4()] * 2), +@keyframes {0} {{ 0% {{opacity: 1;}} 100% {{ opacity: 0; }}}} +.foreground-svg {{ animation: 1s ease-in-out 0s alternate none infinite paused {0};}} +.foreground-svg:hover {{ animation-play-state: running;}} +""".format(f'flickerAnimation{uuid4()}'), ) return svg @@ -508,8 +504,8 @@ def plot_melodic_components( melodic_dir, in_file, tr=None, - out_file="melodic_reportlet.svg", - compress="auto", + out_file='melodic_reportlet.svg', + compress='auto', report_mask=None, noise_components_file=None, ): @@ -545,33 +541,32 @@ def plot_melodic_components( is printed at the top. """ - from nilearn.image import index_img, iter_img + import os + import nibabel as nb import numpy as np import pylab as plt import seaborn as sns from matplotlib.gridspec import GridSpec - import os + from nilearn.image import index_img, iter_img - sns.set_style("white") + sns.set_style('white') current_palette = sns.color_palette() in_nii = nb.load(in_file) if not tr: tr = in_nii.header.get_zooms()[3] units = in_nii.header.get_xyzt_units() if units: - if units[-1] == "msec": + if units[-1] == 'msec': tr = tr / 1000.0 - elif units[-1] == "usec": + elif units[-1] == 'usec': tr = tr / 1000000.0 - elif units[-1] != "sec": + elif units[-1] != 'sec': NIWORKFLOWS_LOG.warning( - "Unknown repetition time units specified - assuming seconds" + 'Unknown repetition time units specified - assuming seconds' ) else: - NIWORKFLOWS_LOG.warning( - "Repetition time units not specified - assuming seconds" - ) + NIWORKFLOWS_LOG.warning('Repetition time units not specified - assuming seconds') try: from nilearn.maskers import NiftiMasker @@ -580,27 +575,24 @@ def plot_melodic_components( from nilearn.plotting import cm if not report_mask: - nifti_masker = NiftiMasker(mask_strategy="epi") + nifti_masker = NiftiMasker(mask_strategy='epi') nifti_masker.fit(index_img(in_nii, range(2))) mask_img = nifti_masker.mask_img_ else: mask_img = nb.load(report_mask) - mask_sl = [ - transform_to_2d(mask_img.get_fdata(), j) - for j in range(3) - ] + mask_sl = [transform_to_2d(mask_img.get_fdata(), j) for j in range(3)] - timeseries = np.loadtxt(os.path.join(melodic_dir, "melodic_mix")) - power = np.loadtxt(os.path.join(melodic_dir, "melodic_FTmix")) - stats = np.loadtxt(os.path.join(melodic_dir, "melodic_ICstats")) + timeseries = np.loadtxt(os.path.join(melodic_dir, 'melodic_mix')) + power = np.loadtxt(os.path.join(melodic_dir, 'melodic_FTmix')) + stats = np.loadtxt(os.path.join(melodic_dir, 'melodic_ICstats')) n_components = stats.shape[0] Fs = 1.0 / tr Ny = Fs / 2 f = Ny * (np.array(list(range(1, power.shape[0] + 1)))) / (power.shape[0]) # Set default colors - color_title = "k" + color_title = 'k' color_time = current_palette[0] color_power = current_palette[1] classified_colors = None @@ -609,12 +601,10 @@ def plot_melodic_components( # Only if the components file has been provided, a warning banner will # be issued if all or none of the components were classified as noise if noise_components_file: - noise_components = np.loadtxt( - noise_components_file, dtype=int, delimiter=",", ndmin=1 - ) + noise_components = np.loadtxt(noise_components_file, dtype=int, delimiter=',', ndmin=1) # Activate warning row if pertinent warning_row = int(noise_components.size in (0, n_components)) - classified_colors = {True: "r", False: "g"} + classified_colors = {True: 'r', False: 'g'} n_rows = int((n_components + (n_components % 2)) / 2) fig = plt.figure(figsize=(6.5 * 1.5, (n_rows + warning_row) * 0.85)) @@ -627,29 +617,28 @@ def plot_melodic_components( if warning_row: ax = fig.add_subplot(gs[0, :]) - ncomps = "NONE of the" + ncomps = 'NONE of the' if noise_components.size == n_components: - ncomps = "ALL" + ncomps = 'ALL' ax.annotate( - "WARNING: {} components were classified as noise".format(ncomps), + f'WARNING: {ncomps} components were classified as noise', xy=(0.0, 0.5), - xycoords="axes fraction", + xycoords='axes fraction', xytext=(0.01, 0.5), - textcoords="axes fraction", + textcoords='axes fraction', size=12, - color="#ea8800", - bbox=dict(boxstyle="round", fc="#f7dcb7", ec="#FC990E"), + color='#ea8800', + bbox={'boxstyle': 'round', 'fc': '#f7dcb7', 'ec': '#FC990E'}, ) ax.axes.get_xaxis().set_visible(False) ax.axes.get_yaxis().set_visible(False) - titlefmt = "C{id:d}{noise}: Tot. var. expl. {var:.2g}%".format - ICs = nb.load(os.path.join(melodic_dir, "melodic_IC.nii.gz")) + titlefmt = 'C{id:d}{noise}: Tot. var. expl. {var:.2g}%'.format + ICs = nb.load(os.path.join(melodic_dir, 'melodic_IC.nii.gz')) # Ensure 4D if ICs.ndim == 3: ICs = ICs.slicer[..., None] for i, img in enumerate(iter_img(ICs)): - col = i % 2 row = i // 2 l_row = row * 2 + warning_row @@ -662,23 +651,21 @@ def plot_melodic_components( data = img.get_fdata() for j in range(3): - ax1 = fig.add_subplot(gs[l_row:l_row + 2, j + col * 5]) + ax1 = fig.add_subplot(gs[l_row : l_row + 2, j + col * 5]) sl = transform_to_2d(data, j) m = np.abs(sl).max() - ax1.imshow( - sl, vmin=-m, vmax=+m, cmap=cm.cold_white_hot, interpolation="nearest" - ) - ax1.contour(mask_sl[j], levels=[0.5], colors="k", linewidths=0.5) - plt.axis("off") - ax1.autoscale_view("tight") + ax1.imshow(sl, vmin=-m, vmax=+m, cmap=cm.cold_white_hot, interpolation='nearest') + ax1.contour(mask_sl[j], levels=[0.5], colors='k', linewidths=0.5) + plt.axis('off') + ax1.autoscale_view('tight') if j == 0: ax1.set_title( - titlefmt(id=i + 1, noise=" [noise]" * is_noise, var=stats[i, 1]), + titlefmt(id=i + 1, noise=' [noise]' * is_noise, var=stats[i, 1]), x=0, y=1.18, fontsize=7, - horizontalalignment="left", - verticalalignment="top", + horizontalalignment='left', + verticalalignment='top', color=color_title, ) @@ -693,8 +680,8 @@ def plot_melodic_components( ) ax2.set_xlim([0, len(timeseries[:, i]) * tr]) ax2.axes.get_yaxis().set_visible(False) - ax2.autoscale_view("tight") - ax2.tick_params(axis="both", which="major", pad=0) + ax2.autoscale_view('tight') + ax2.tick_params(axis='both', which='major', pad=0) sns.despine(left=True, bottom=True) for label in ax2.xaxis.get_majorticklabels(): label.set_fontsize(6) @@ -708,8 +695,8 @@ def plot_melodic_components( ) ax3.set_xlim([f[0], f.max()]) ax3.axes.get_yaxis().set_visible(False) - ax3.autoscale_view("tight") - ax3.tick_params(axis="both", which="major", pad=0) + ax3.autoscale_view('tight') + ax3.tick_params(axis='both', which='major', pad=0) for label in ax3.xaxis.get_majorticklabels(): label.set_fontsize(6) label.set_color(color_power) @@ -719,9 +706,9 @@ def plot_melodic_components( fig.savefig( out_file, dpi=300, - format="svg", + format='svg', transparent=True, - bbox_inches="tight", + bbox_inches='tight', pad_inches=0.01, ) fig.clf() diff --git a/niworkflows/workflows/epi/refmap.py b/niworkflows/workflows/epi/refmap.py index 752a828f365..1ee3b2297e0 100644 --- a/niworkflows/workflows/epi/refmap.py +++ b/niworkflows/workflows/epi/refmap.py @@ -22,19 +22,18 @@ # """Workflow for the generation of EPI (echo-planar imaging) references.""" -from nipype.pipeline import engine as pe from nipype.interfaces import utility as niu +from nipype.pipeline import engine as pe from ...engine.workflows import LiterateWorkflow as Workflow - DEFAULT_MEMORY_MIN_GB = 0.01 def init_epi_reference_wf( omp_nthreads, auto_bold_nss=False, - name="epi_reference_wf", + name='epi_reference_wf', ): """ Build a workflow that generates a reference map from a set of EPI images. @@ -116,42 +115,38 @@ def init_epi_reference_wf( """ from nipype.interfaces.ants import N4BiasFieldCorrection - from ...utils.connections import listify from ...interfaces.bold import NonsteadyStatesDetector from ...interfaces.freesurfer import StructuralReference from ...interfaces.header import ValidateImage from ...interfaces.images import RobustAverage from ...interfaces.nibabel import IntensityClip + from ...utils.connections import listify wf = Workflow(name=name) - inputnode = pe.Node( - niu.IdentityInterface(fields=["in_files", "t_masks"]), name="inputnode" - ) + inputnode = pe.Node(niu.IdentityInterface(fields=['in_files', 't_masks']), name='inputnode') outputnode = pe.Node( niu.IdentityInterface( fields=[ - "epi_ref_file", - "xfm_files", - "per_run_ref_files", - "drift_factors", - "n_dummy", - "validation_report", + 'epi_ref_file', + 'xfm_files', + 'per_run_ref_files', + 'drift_factors', + 'n_dummy', + 'validation_report', ] ), - name="outputnode", + name='outputnode', ) - validate_nii = pe.MapNode( - ValidateImage(), name="validate_nii", iterfield=["in_file"] - ) + validate_nii = pe.MapNode(ValidateImage(), name='validate_nii', iterfield=['in_file']) per_run_avgs = pe.MapNode( - RobustAverage(), name="per_run_avgs", mem_gb=1, iterfield=["in_file", "t_mask"] + RobustAverage(), name='per_run_avgs', mem_gb=1, iterfield=['in_file', 't_mask'] ) - clip_avgs = pe.MapNode(IntensityClip(), name="clip_avgs", iterfield=["in_file"]) + clip_avgs = pe.MapNode(IntensityClip(), name='clip_avgs', iterfield=['in_file']) # de-gradient the fields ("bias/illumination artifact") n4_avgs = pe.MapNode( @@ -163,13 +158,13 @@ def init_epi_reference_wf( shrink_factor=4, ), n_procs=omp_nthreads, - name="n4_avgs", - iterfield=["input_image"], + name='n4_avgs', + iterfield=['input_image'], ) clip_bg_noise = pe.MapNode( IntensityClip(p_min=2.0, p_max=100.0), - name="clip_bg_noise", - iterfield=["in_file"], + name='clip_bg_noise', + iterfield=['in_file'], ) epi_merge = pe.Node( @@ -182,48 +177,48 @@ def init_epi_reference_wf( no_iteration=True, transform_outputs=True, ), - name="epi_merge", + name='epi_merge', ) - post_merge = pe.Node(niu.Function(function=_post_merge), name="post_merge") + post_merge = pe.Node(niu.Function(function=_post_merge), name='post_merge') def _set_threads(in_list, maximum): return min(len(in_list), maximum) # fmt:off wf.connect([ - (inputnode, validate_nii, [(("in_files", listify), "in_file")]), - (validate_nii, per_run_avgs, [("out_file", "in_file")]), - (per_run_avgs, clip_avgs, [("out_file", "in_file")]), - (clip_avgs, n4_avgs, [("out_file", "input_image")]), - (n4_avgs, clip_bg_noise, [("output_image", "in_file")]), + (inputnode, validate_nii, [(('in_files', listify), 'in_file')]), + (validate_nii, per_run_avgs, [('out_file', 'in_file')]), + (per_run_avgs, clip_avgs, [('out_file', 'in_file')]), + (clip_avgs, n4_avgs, [('out_file', 'input_image')]), + (n4_avgs, clip_bg_noise, [('output_image', 'in_file')]), (clip_bg_noise, epi_merge, [ - ("out_file", "in_files"), - (("out_file", _set_threads, omp_nthreads), "num_threads"), + ('out_file', 'in_files'), + (('out_file', _set_threads, omp_nthreads), 'num_threads'), ]), - (epi_merge, post_merge, [("out_file", "in_file"), - ("transform_outputs", "in_xfms")]), - (post_merge, outputnode, [("out", "epi_ref_file")]), - (epi_merge, outputnode, [("transform_outputs", "xfm_files")]), - (per_run_avgs, outputnode, [("out_drift", "drift_factors")]), - (n4_avgs, outputnode, [("output_image", "per_run_ref_files")]), - (validate_nii, outputnode, [("out_report", "validation_report")]), + (epi_merge, post_merge, [('out_file', 'in_file'), + ('transform_outputs', 'in_xfms')]), + (post_merge, outputnode, [('out', 'epi_ref_file')]), + (epi_merge, outputnode, [('transform_outputs', 'xfm_files')]), + (per_run_avgs, outputnode, [('out_drift', 'drift_factors')]), + (n4_avgs, outputnode, [('output_image', 'per_run_ref_files')]), + (validate_nii, outputnode, [('out_report', 'validation_report')]), ]) # fmt:on if auto_bold_nss: select_volumes = pe.MapNode( - NonsteadyStatesDetector(), name="select_volumes", iterfield=["in_file"] + NonsteadyStatesDetector(), name='select_volumes', iterfield=['in_file'] ) # fmt:off wf.connect([ - (validate_nii, select_volumes, [("out_file", "in_file")]), - (select_volumes, per_run_avgs, [("t_mask", "t_mask")]), - (select_volumes, outputnode, [("n_dummy", "n_dummy")]) + (validate_nii, select_volumes, [('out_file', 'in_file')]), + (select_volumes, per_run_avgs, [('t_mask', 't_mask')]), + (select_volumes, outputnode, [('n_dummy', 'n_dummy')]) ]) # fmt:on else: - wf.connect(inputnode, "t_masks", per_run_avgs, "t_mask") + wf.connect(inputnode, 't_masks', per_run_avgs, 't_mask') return wf @@ -245,17 +240,19 @@ def _post_merge(in_file, in_xfms): from niworkflows.utils.connections import listify in_xfms = listify(in_xfms) - if len(in_xfms) == 1 and in_file.endswith((".nii", ".nii.gz")): + if len(in_xfms) == 1 and in_file.endswith(('.nii', '.nii.gz')): return in_file if len(in_xfms) == 1: - raise RuntimeError("Output format and number of transforms do not match") + raise RuntimeError('Output format and number of transforms do not match') from pathlib import Path + import nibabel as nb + from niworkflows.interfaces.nibabel import _advanced_clip - out_file = Path() / Path(in_file).name.replace(".mgz", ".nii.gz") + out_file = Path() / Path(in_file).name.replace('.mgz', '.nii.gz') img = nb.load(in_file) nb.Nifti1Image(img.dataobj, img.affine, None).to_filename(out_file) return _advanced_clip(out_file, p_min=0.0, p_max=100.0) diff --git a/niworkflows/workflows/epi/tests/test_refmap.py b/niworkflows/workflows/epi/tests/test_refmap.py index 66661487223..11898f7c7d6 100644 --- a/niworkflows/workflows/epi/tests/test_refmap.py +++ b/niworkflows/workflows/epi/tests/test_refmap.py @@ -21,13 +21,15 @@ # https://www.nipreps.org/community/licensing/ # """Check the refmap module.""" + import os import unittest -from ..refmap import init_epi_reference_wf + from ....testing import has_afni +from ..refmap import init_epi_reference_wf -@unittest.skipUnless(has_afni, "Needs AFNI") +@unittest.skipUnless(has_afni, 'Needs AFNI') def test_reference(tmpdir, ds000030_dir, workdir, outdir): """Exercise the EPI reference workflow.""" tmpdir.chdir() @@ -37,7 +39,7 @@ def test_reference(tmpdir, ds000030_dir, workdir, outdir): wf.base_dir = str(workdir) wf.inputs.inputnode.in_files = [ - str(f) for f in (ds000030_dir / "sub-10228" / "func").glob("*_bold.nii.gz") + str(f) for f in (ds000030_dir / 'sub-10228' / 'func').glob('*_bold.nii.gz') ] # if outdir: diff --git a/pyproject.toml b/pyproject.toml index 5624790eb3a..c825d35625c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,11 +109,57 @@ raw-options = {version_scheme = "release-branch-semver" } [tool.hatch.build.hooks.vcs] version-file = "niworkflows/_version.py" +# Disable black [tool.black] +exclude = ".*" + +[tool.ruff] line-length = 99 -target-version = ['py37'] -skip-string-normalization = true -extend-exclude = '_version.py' + +[tool.ruff.lint] +extend-select = [ + "F", + "E", + "W", + "I", + "UP", + "YTT", + "S", + "BLE", + "B", + "A", + # "CPY", + "C4", + "DTZ", + "T10", + # "EM", + "EXE", + "FA", + "ISC", + "ICN", + "PT", + "Q", +] +ignore = [ + "S311", # We are not using random for cryptographic purposes + "ISC001", + "S603", +] + +[tool.ruff.lint.flake8-quotes] +inline-quotes = "single" + +[tool.ruff.lint.extend-per-file-ignores] +"*/__init__.py" = ["F401"] +"*/test_*.py" = ["S101"] +"docs/conf.py" = ["A001"] +"niworkflows/engine/plugin.py" = [ + "BLE001", # except Exception is intentional + "S101", # Assertions are intentional +] + +[tool.ruff.format] +quote-style = "single" [tool.pytest.ini_options] minversion = "6" @@ -155,3 +201,7 @@ source = [ "niworkflows", "**/site-packages/niworkflows" ] + +[tool.codespell] +skip = "*/data/*,*/docs/_build/*,./examples/viz-report.*" +ignore-words-list = "objekt,nd" diff --git a/tools/update_changes.sh b/tools/update_changes.sh index a0dcd070a5a..5fc86f5bad4 100644 --- a/tools/update_changes.sh +++ b/tools/update_changes.sh @@ -52,4 +52,3 @@ fi # Replace old CHANGES.rst with new file mv newchanges CHANGES.rst -