diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 94e1639..177fd90 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,19 +1,21 @@ exclude: ".*/data/.*" repos: -- repo: https://github.com/pre-commit/pre-commit-hooks + - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - - id: trailing-whitespace - - id: end-of-file-fixer - - id: check-yaml - - id: check-added-large-files -- repo: https://github.com/psf/black - rev: 23.1.0 + - id: trailing-whitespace + exclude: '.*\.svg' + - id: end-of-file-fixer + exclude: '.*\.svg' + - id: check-yaml + - id: check-json + - id: check-toml + - id: check-added-large-files + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.4.3 hooks: - - id: black - files: ^src/fmripost_aroma/ -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - files: ^src/fmripost_aroma/ + - id: ruff + args: [ --fix ] + - id: ruff-format + - id: ruff + args: [ --select, ISC001, --fix ] diff --git a/docs/conf.py b/docs/conf.py index 65f2a1d..d5dff87 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -21,47 +21,47 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.append(os.path.abspath("sphinxext")) -sys.path.insert(0, os.path.abspath("../wrapper")) +sys.path.append(os.path.abspath('sphinxext')) +sys.path.insert(0, os.path.abspath('../wrapper')) from github_link import make_linkcode_resolve # noqa: E402 # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.5.3" +needs_sphinx = '1.5.3' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named "sphinx.ext.*") or your custom # ones. extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.doctest", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.mathjax", - "sphinx.ext.linkcode", - "sphinx.ext.napoleon", - "sphinxarg.ext", # argparse extension - "nipype.sphinxext.plot_workflow", + 'sphinx.ext.autodoc', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.linkcode', + 'sphinx.ext.napoleon', + 'sphinxarg.ext', # argparse extension + 'nipype.sphinxext.plot_workflow', ] # Mock modules in autodoc: autodoc_mock_imports = [ - "numpy", - "nitime", - "matplotlib", + 'numpy', + 'nitime', + 'matplotlib', ] -if pver.parse(sphinxversion) >= pver.parse("1.7.0"): +if pver.parse(sphinxversion) >= pver.parse('1.7.0'): autodoc_mock_imports += [ - "pandas", - "nilearn", - "seaborn", + 'pandas', + 'nilearn', + 'seaborn', ] # Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] +templates_path = ['_templates'] # Accept custom section names to be parsed for numpy-style docstrings # of parameters. @@ -69,41 +69,41 @@ # https://github.com/sphinx-contrib/napoleon/pull/10 is merged. napoleon_use_param = False napoleon_custom_sections = [ - ("Inputs", "Parameters"), - ("Outputs", "Parameters"), + ('Inputs', 'Parameters'), + ('Outputs', 'Parameters'), ] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = [".rst", ".md"] -source_suffix = ".rst" +source_suffix = '.rst' # The encoding of source files. # source_encoding = "utf-8-sig" # The master toctree document. -master_doc = "index" +master_doc = 'index' # General information about the project. -project = "fMRIPost-AROMA" -author = "The fMRIPost-AROMA developers" -copyright = f"2016-, {author}" +project = 'fMRIPost-AROMA' +author = 'The fMRIPost-AROMA developers' +copyright = f'2016-, {author}' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = "version" +version = 'version' # The full version, including alpha/beta/rc tags. -release = "version" +release = 'version' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = "en" +language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -114,7 +114,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -132,7 +132,7 @@ # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" +pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -148,7 +148,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = "sphinx_rtd_theme" +html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -177,7 +177,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] +html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied @@ -242,7 +242,7 @@ # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = "fmripost_aroma_doc" +htmlhelp_basename = 'fmripost_aroma_doc' # -- Options for LaTeX output --------------------------------------------- @@ -261,7 +261,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, "fmripost_aroma.tex", "fMRIPost-AROMA Documentation", author, "manual"), + (master_doc, 'fmripost_aroma.tex', 'fMRIPost-AROMA Documentation', author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -289,7 +289,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "fmripost-aroma", "fMRIPost-AROMA Documentation", [author], 1)] +man_pages = [(master_doc, 'fmripost-aroma', 'fMRIPost-AROMA Documentation', [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -303,12 +303,12 @@ texinfo_documents = [ ( master_doc, - "fMRIPost-AROMA", - "fMRIPost-AROMA Documentation", + 'fMRIPost-AROMA', + 'fMRIPost-AROMA Documentation', author, - "fMRIPost-AROMA", - "One line description of project.", - "Miscellaneous", + 'fMRIPost-AROMA', + 'One line description of project.', + 'Miscellaneous', ), ] @@ -326,31 +326,31 @@ # The following is used by sphinx.ext.linkcode to provide links to github linkcode_resolve = make_linkcode_resolve( - "fmripost_aroma", - "https://github.com/nipreps/fMRIPost-AROMA/blob/{revision}/{package}/{path}#L{lineno}", + 'fmripost_aroma', + 'https://github.com/nipreps/fMRIPost-AROMA/blob/{revision}/{package}/{path}#L{lineno}', ) # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { - "python": ("https://docs.python.org/3/", None), - "numpy": ("https://numpy.org/doc/stable/", None), - "scipy": ("https://docs.scipy.org/doc/scipy/", None), - "matplotlib": ("https://matplotlib.org/stable/", None), - "bids": ("https://bids-standard.github.io/pybids/", None), - "nibabel": ("https://nipy.org/nibabel/", None), - "nipype": ("https://nipype.readthedocs.io/en/latest/", None), - "niworkflows": ("https://www.nipreps.org/niworkflows/", None), - "fmriprep": ("https://fmriprep.org/en/stable/", None), - "sdcflows": ("https://www.nipreps.org/sdcflows/", None), - "smriprep": ("https://www.nipreps.org/smriprep/", None), - "templateflow": ("https://www.templateflow.org/python-client", None), - "tedana": ("https://tedana.readthedocs.io/en/latest/", None), + 'python': ('https://docs.python.org/3/', None), + 'numpy': ('https://numpy.org/doc/stable/', None), + 'scipy': ('https://docs.scipy.org/doc/scipy/', None), + 'matplotlib': ('https://matplotlib.org/stable/', None), + 'bids': ('https://bids-standard.github.io/pybids/', None), + 'nibabel': ('https://nipy.org/nibabel/', None), + 'nipype': ('https://nipype.readthedocs.io/en/latest/', None), + 'niworkflows': ('https://www.nipreps.org/niworkflows/', None), + 'fmriprep': ('https://fmriprep.org/en/stable/', None), + 'sdcflows': ('https://www.nipreps.org/sdcflows/', None), + 'smriprep': ('https://www.nipreps.org/smriprep/', None), + 'templateflow': ('https://www.templateflow.org/python-client', None), + 'tedana': ('https://tedana.readthedocs.io/en/latest/', None), } -suppress_warnings = ["image.nonlocal_uri"] +suppress_warnings = ['image.nonlocal_uri'] def setup(app): - app.add_css_file("theme_overrides.css") + app.add_css_file('theme_overrides.css') # We need this for the boilerplate script - app.add_js_file("https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js") + app.add_js_file('https://cdn.rawgit.com/chrisfilo/zenodo.js/v0.1/zenodo.js') diff --git a/docs/sphinxext/github_link.py b/docs/sphinxext/github_link.py index 5dc699a..fabba0a 100644 --- a/docs/sphinxext/github_link.py +++ b/docs/sphinxext/github_link.py @@ -10,16 +10,16 @@ from functools import partial from operator import attrgetter -REVISION_CMD = "git rev-parse --short HEAD" +REVISION_CMD = 'git rev-parse --short HEAD' def _get_git_revision(): try: revision = subprocess.check_output(REVISION_CMD.split()).strip() except (subprocess.CalledProcessError, OSError): - print("Failed to execute git to get revision") + print('Failed to execute git to get revision') return None - return revision.decode("utf-8") + return revision.decode('utf-8') def _linkcode_resolve(domain, info, package, url_fmt, revision): @@ -39,14 +39,14 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision): if revision is None: return - if domain not in ("py", "pyx"): + if domain not in ('py', 'pyx'): return - if not info.get("module") or not info.get("fullname"): + if not info.get('module') or not info.get('fullname'): return - class_name = info["fullname"].split(".")[0] - module = __import__(info["module"], fromlist=[class_name]) - obj = attrgetter(info["fullname"])(module) + class_name = info['fullname'].split('.')[0] + module = __import__(info['module'], fromlist=[class_name]) + obj = attrgetter(info['fullname'])(module) # Unwrap the object to get the correct source # file in case that is wrapped by a decorator @@ -68,7 +68,7 @@ def _linkcode_resolve(domain, info, package, url_fmt, revision): try: lineno = inspect.getsourcelines(obj)[1] except Exception: - lineno = "" + lineno = '' return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno) diff --git a/pyproject.toml b/pyproject.toml index 0065a39..8fb6ef0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -115,12 +115,51 @@ exclude_lines = [ "if TYPE_CHECKING:", ] +# Disable black [tool.black] -line-length = 99 -target-version = ['py39'] - -[tool.isort] -profile = 'black' +exclude = ".*" [tool.ruff] line-length = 99 + +[tool.ruff.lint] +extend-select = [ + "F", + "E", + "W", + "I", + "UP", + "YTT", + "S", + "BLE", + "B", + "A", + # "CPY", + "C4", + "DTZ", + "T10", + # "EM", + "EXE", + "FA", + "ISC", + "ICN", + "PT", + "Q", +] +ignore = [ + "S311", # We are not using random for cryptographic purposes + "ISC001", + "S603", +] + +[tool.ruff.lint.flake8-quotes] +inline-quotes = "single" + +[tool.ruff.lint.extend-per-file-ignores] +"*/test_*.py" = ["S101"] +"fmriprep/utils/debug.py" = ["A002", "T100"] +"docs/conf.py" = ["A001"] +"docs/sphinxext/github_link.py" = ["BLE001"] + +[tool.ruff.format] +quote-style = "single" diff --git a/scripts/fetch_templates.py b/scripts/fetch_templates.py index 11b1ac0..04881d6 100755 --- a/scripts/fetch_templates.py +++ b/scripts/fetch_templates.py @@ -23,13 +23,13 @@ def fetch_MNI2009(): tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-02_desc-fMRIPrep_boldref.nii.gz tpl-MNI152NLin2009cAsym/tpl-MNI152NLin2009cAsym_res-01_label-brain_probseg.nii.gz """ - template = "MNI152NLin2009cAsym" + template = 'MNI152NLin2009cAsym' - tf.get(template, resolution=(1, 2), desc=None, suffix="T1w") - tf.get(template, resolution=(1, 2), desc="brain", suffix="mask") - tf.get(template, resolution=1, atlas=None, desc="carpet", suffix="dseg") - tf.get(template, resolution=2, desc="fMRIPrep", suffix="boldref") - tf.get(template, resolution=1, label="brain", suffix="probseg") + tf.get(template, resolution=(1, 2), desc=None, suffix='T1w') + tf.get(template, resolution=(1, 2), desc='brain', suffix='mask') + tf.get(template, resolution=1, atlas=None, desc='carpet', suffix='dseg') + tf.get(template, resolution=2, desc='fMRIPrep', suffix='boldref') + tf.get(template, resolution=1, label='brain', suffix='probseg') def fetch_MNI6(): @@ -42,12 +42,12 @@ def fetch_MNI6(): tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_desc-brain_mask.nii.gz tpl-MNI152NLin6Asym/tpl-MNI152NLin6Asym_res-02_atlas-HCP_dseg.nii.gz """ - template = "MNI152NLin6Asym" + template = 'MNI152NLin6Asym' - tf.get(template, resolution=(1, 2), desc=None, suffix="T1w") - tf.get(template, resolution=(1, 2), desc="brain", suffix="mask") + tf.get(template, resolution=(1, 2), desc=None, suffix='T1w') + tf.get(template, resolution=(1, 2), desc='brain', suffix='mask') # CIFTI - tf.get(template, resolution=2, atlas="HCP", suffix="dseg") + tf.get(template, resolution=2, atlas='HCP', suffix='dseg') def fetch_OASIS(): @@ -61,14 +61,14 @@ def fetch_OASIS(): tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-brain_mask.nii.gz tpl-OASIS30ANTs/tpl-OASIS30ANTs_res-01_desc-BrainCerebellumExtraction_mask.nii.gz """ - template = "OASIS30ANTs" + template = 'OASIS30ANTs' - tf.get(template, resolution=1, desc=None, label=None, suffix="T1w") - tf.get(template, resolution=1, label="WM", suffix="probseg") - tf.get(template, resolution=1, label="BS", suffix="probseg") - tf.get(template, resolution=1, label="brain", suffix="probseg") - tf.get(template, resolution=1, label="brain", suffix="mask") - tf.get(template, resolution=1, desc="BrainCerebellumExtraction", suffix="mask") + tf.get(template, resolution=1, desc=None, label=None, suffix='T1w') + tf.get(template, resolution=1, label='WM', suffix='probseg') + tf.get(template, resolution=1, label='BS', suffix='probseg') + tf.get(template, resolution=1, label='brain', suffix='probseg') + tf.get(template, resolution=1, label='brain', suffix='mask') + tf.get(template, resolution=1, desc='BrainCerebellumExtraction', suffix='mask') def fetch_fsaverage(): @@ -84,11 +84,11 @@ def fetch_fsaverage(): tpl-fsaverage/tpl-fsaverage_hemi-L_den-164k_sulc.shape.gii tpl-fsaverage/tpl-sfaverage_hemi-R_den-164k_sulc.shape.gii """ - template = "fsaverage" + template = 'fsaverage' - tf.get(template, density="164k", desc="std", suffix="sphere") - tf.get(template, density="164k", suffix="midthickness") - tf.get(template, density="164k", suffix="sulc") + tf.get(template, density='164k', desc='std', suffix='sphere') + tf.get(template, density='164k', suffix='midthickness') + tf.get(template, density='164k', suffix='sulc') def fetch_fsLR(): @@ -104,7 +104,7 @@ def fetch_fsLR(): tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-L_den-32k_sphere.surf.gii tpl-fsLR/tpl-fsLR_space-fsaverage_hemi-R_den-32k_sphere.surf.gii """ - tf.get("fsLR", density="32k") + tf.get('fsLR', density='32k') def fetch_all(): @@ -115,21 +115,21 @@ def fetch_all(): # fetch_fsLR() -if __name__ == "__main__": +if __name__ == '__main__': parser = argparse.ArgumentParser( - description="Helper script for pre-caching required templates to run fMRIPost-AROMA", + description='Helper script for pre-caching required templates to run fMRIPost-AROMA', ) parser.add_argument( - "--tf-dir", + '--tf-dir', type=os.path.abspath, - help="Directory to save templates in. If not provided, templates will be saved to" - " `${HOME}/.cache/templateflow`.", + help='Directory to save templates in. If not provided, templates will be saved to' + ' `${HOME}/.cache/templateflow`.', ) opts = parser.parse_args() # set envvar (if necessary) prior to templateflow import if opts.tf_dir is not None: - os.environ["TEMPLATEFLOW_HOME"] = opts.tf_dir + os.environ['TEMPLATEFLOW_HOME'] = opts.tf_dir import templateflow.api as tf diff --git a/src/fmripost_aroma/__init__.py b/src/fmripost_aroma/__init__.py index d34bde0..2afc7fe 100644 --- a/src/fmripost_aroma/__init__.py +++ b/src/fmripost_aroma/__init__.py @@ -25,4 +25,4 @@ try: from ._version import __version__ except ImportError: - __version__ = "0+unknown" + __version__ = '0+unknown' diff --git a/src/fmripost_aroma/__main__.py b/src/fmripost_aroma/__main__.py index fa425e0..d44794e 100644 --- a/src/fmripost_aroma/__main__.py +++ b/src/fmripost_aroma/__main__.py @@ -3,7 +3,7 @@ # SPDX-License-Identifier: Apache-2.0 import sys -if __name__ == "__main__": +if __name__ == '__main__': from .cli import fmripost_aroma sys.exit(fmripost_aroma()) diff --git a/src/fmripost_aroma/cli/__init__.py b/src/fmripost_aroma/cli/__init__.py index 66bc48e..e24f838 100644 --- a/src/fmripost_aroma/cli/__init__.py +++ b/src/fmripost_aroma/cli/__init__.py @@ -6,8 +6,8 @@ from .. import __version__ -@click.group(context_settings={"help_option_names": ["-h", "--help"]}, invoke_without_command=True) -@click.version_option(version=__version__, prog_name="fmripost-aroma") +@click.group(context_settings={'help_option_names': ['-h', '--help']}, invoke_without_command=True) +@click.version_option(version=__version__, prog_name='fmripost-aroma') @click.pass_context def fmripost_aroma(ctx: click.Context): - click.echo("Hello world!") + click.echo('Hello world!') diff --git a/src/fmripost_aroma/cli/parser.py b/src/fmripost_aroma/cli/parser.py index 28cade6..3b32ee9 100644 --- a/src/fmripost_aroma/cli/parser.py +++ b/src/fmripost_aroma/cli/parser.py @@ -47,14 +47,14 @@ def __call__(self, parser, namespace, values, option_string=None): d = {} for spec in values: try: - name, loc = spec.split("=") + name, loc = spec.split('=') loc = Path(loc) except ValueError: loc = Path(spec) name = loc.name if name in d: - raise ValueError(f"Received duplicate derivative name: {name}") + raise ValueError(f'Received duplicate derivative name: {name}') d[name] = loc setattr(namespace, self.dest, d) @@ -62,14 +62,14 @@ def __call__(self, parser, namespace, values, option_string=None): def _path_exists(path, parser): """Ensure a given path exists.""" if path is None or not Path(path).exists(): - raise parser.error(f"Path does not exist: <{path}>.") + raise parser.error(f'Path does not exist: <{path}>.') return Path(path).absolute() def _is_file(path, parser): """Ensure a given path exists and it is a file.""" path = _path_exists(path, parser) if not path.is_file(): - raise parser.error(f"Path should point to a file (or symlink of file): <{path}>.") + raise parser.error(f'Path should point to a file (or symlink of file): <{path}>.') return path def _min_one(value, parser): @@ -80,19 +80,19 @@ def _min_one(value, parser): return value def _to_gb(value): - scale = {"G": 1, "T": 10**3, "M": 1e-3, "K": 1e-6, "B": 1e-9} - digits = "".join([c for c in value if c.isdigit()]) - units = value[len(digits) :] or "M" + scale = {'G': 1, 'T': 10**3, 'M': 1e-3, 'K': 1e-6, 'B': 1e-9} + digits = ''.join([c for c in value if c.isdigit()]) + units = value[len(digits) :] or 'M' return int(digits) * scale[units[0]] def _drop_sub(value): - return value[4:] if value.startswith("sub-") else value + return value[4:] if value.startswith('sub-') else value def _filter_pybids_none_any(dct): import bids return { - k: bids.layout.Query.NONE if v is None else (bids.layout.Query.ANY if v == "*" else v) + k: bids.layout.Query.NONE if v is None else (bids.layout.Query.ANY if v == '*' else v) for k, v in dct.items() } @@ -103,18 +103,18 @@ def _bids_filter(value, parser): if Path(value).exists(): try: return loads(Path(value).read_text(), object_hook=_filter_pybids_none_any) - except JSONDecodeError: - raise parser.error(f"JSON syntax error in: <{value}>.") + except JSONDecodeError as e: + raise parser.error(f'JSON syntax error in: <{value}>.') from e else: - raise parser.error(f"Path does not exist: <{value}>.") + raise parser.error(f'Path does not exist: <{value}>.') - verstr = f"fMRIPost-AROMA v{config.environment.version}" + verstr = f'fMRIPost-AROMA v{config.environment.version}' currentv = Version(config.environment.version) is_release = not any((currentv.is_devrelease, currentv.is_prerelease, currentv.is_postrelease)) parser = ArgumentParser( description=( - f"fMRIPost-AROMA: fMRI POSTprocessing AROMA workflow v{config.environment.version}" + f'fMRIPost-AROMA: fMRI POSTprocessing AROMA workflow v{config.environment.version}' ), formatter_class=ArgumentDefaultsHelpFormatter, **kwargs, @@ -128,60 +128,60 @@ def _bids_filter(value, parser): # required, positional arguments # IMPORTANT: they must go directly with the parser object parser.add_argument( - "bids_dir", - action="store", + 'bids_dir', + action='store', type=PathExists, help=( - "The root folder of a BIDS-valid raw dataset " - "(sub-XXXXX folders should be found at the top level in this folder)." + 'The root folder of a BIDS-valid raw dataset ' + '(sub-XXXXX folders should be found at the top level in this folder).' ), ) parser.add_argument( - "output_dir", - action="store", + 'output_dir', + action='store', type=Path, - help="The output path for the outcomes of preprocessing and visual reports", + help='The output path for the outcomes of preprocessing and visual reports', ) parser.add_argument( - "analysis_level", - choices=["participant"], + 'analysis_level', + choices=['participant'], help=( "Processing stage to be run, only 'participant' in the case of " - "fMRIPost-AROMA (see BIDS-Apps specification)." + 'fMRIPost-AROMA (see BIDS-Apps specification).' ), ) - g_bids = parser.add_argument_group("Options for filtering BIDS queries") + g_bids = parser.add_argument_group('Options for filtering BIDS queries') g_bids.add_argument( - "--skip_bids_validation", - "--skip-bids-validation", - action="store_true", + '--skip_bids_validation', + '--skip-bids-validation', + action='store_true', default=False, - help="Assume the input dataset is BIDS compliant and skip the validation", + help='Assume the input dataset is BIDS compliant and skip the validation', ) g_bids.add_argument( - "--participant-label", - "--participant_label", - action="store", - nargs="+", + '--participant-label', + '--participant_label', + action='store', + nargs='+', type=_drop_sub, help=( - "A space delimited list of participant identifiers or a single " - "identifier (the sub- prefix can be removed)" + 'A space delimited list of participant identifiers or a single ' + 'identifier (the sub- prefix can be removed)' ), ) g_bids.add_argument( - "-t", - "--task-id", - action="store", - help="Select a specific task to be processed", + '-t', + '--task-id', + action='store', + help='Select a specific task to be processed', ) g_bids.add_argument( - "--bids-filter-file", - dest="bids_filters", - action="store", + '--bids-filter-file', + dest='bids_filters', + action='store', type=BIDSFilter, - metavar="FILE", + metavar='FILE', help=( "A JSON file describing custom BIDS input filters using PyBIDS. " "For further details, please check out " @@ -191,111 +191,111 @@ def _bids_filter(value, parser): ), ) g_bids.add_argument( - "-d", - "--derivatives", + '-d', + '--derivatives', action=ToDict, - metavar="PACKAGE=PATH", + metavar='PACKAGE=PATH', type=str, - nargs="+", + nargs='+', help=( - "Search PATH(s) for pre-computed derivatives. " - "These may be provided as named folders " - "(e.g., `--derivatives smriprep=/path/to/smriprep`)." + 'Search PATH(s) for pre-computed derivatives. ' + 'These may be provided as named folders ' + '(e.g., `--derivatives smriprep=/path/to/smriprep`).' ), ) g_bids.add_argument( - "--bids-database-dir", - metavar="PATH", + '--bids-database-dir', + metavar='PATH', type=Path, help=( - "Path to a PyBIDS database folder, for faster indexing " - "(especially useful for large datasets). " - "Will be created if not present." + 'Path to a PyBIDS database folder, for faster indexing ' + '(especially useful for large datasets). ' + 'Will be created if not present.' ), ) - g_perfm = parser.add_argument_group("Options to handle performance") + g_perfm = parser.add_argument_group('Options to handle performance') g_perfm.add_argument( - "--nprocs", - "--nthreads", - "--n_cpus", - "--n-cpus", - dest="nprocs", - action="store", + '--nprocs', + '--nthreads', + '--n_cpus', + '--n-cpus', + dest='nprocs', + action='store', type=PositiveInt, - help="Maximum number of threads across all processes", + help='Maximum number of threads across all processes', ) g_perfm.add_argument( - "--omp-nthreads", - action="store", + '--omp-nthreads', + action='store', type=PositiveInt, - help="Maximum number of threads per-process", + help='Maximum number of threads per-process', ) g_perfm.add_argument( - "--mem", - "--mem_mb", - "--mem-mb", - dest="memory_gb", - action="store", + '--mem', + '--mem_mb', + '--mem-mb', + dest='memory_gb', + action='store', type=_to_gb, - metavar="MEMORY_MB", - help="Upper bound memory limit for fMRIPost-AROMA processes", + metavar='MEMORY_MB', + help='Upper bound memory limit for fMRIPost-AROMA processes', ) g_perfm.add_argument( - "--low-mem", - action="store_true", - help="Attempt to reduce memory usage (will increase disk usage in working directory)", + '--low-mem', + action='store_true', + help='Attempt to reduce memory usage (will increase disk usage in working directory)', ) g_perfm.add_argument( - "--use-plugin", - "--nipype-plugin-file", - action="store", - metavar="FILE", + '--use-plugin', + '--nipype-plugin-file', + action='store', + metavar='FILE', type=IsFile, - help="Nipype plugin configuration file", + help='Nipype plugin configuration file', ) g_perfm.add_argument( - "--sloppy", - action="store_true", + '--sloppy', + action='store_true', default=False, - help="Use low-quality tools for speed - TESTING ONLY", + help='Use low-quality tools for speed - TESTING ONLY', ) - g_subset = parser.add_argument_group("Options for performing only a subset of the workflow") + g_subset = parser.add_argument_group('Options for performing only a subset of the workflow') g_subset.add_argument( - "--boilerplate-only", - "--boilerplate_only", - action="store_true", + '--boilerplate-only', + '--boilerplate_only', + action='store_true', default=False, - help="Generate boilerplate only", + help='Generate boilerplate only', ) g_subset.add_argument( - "--reports-only", - action="store_true", + '--reports-only', + action='store_true', default=False, help=( "Only generate reports, don't run workflows. " - "This will only rerun report aggregation, not reportlet generation for specific " - "nodes." + 'This will only rerun report aggregation, not reportlet generation for specific ' + 'nodes.' ), ) - g_conf = parser.add_argument_group("Workflow configuration") + g_conf = parser.add_argument_group('Workflow configuration') g_conf.add_argument( - "--ignore", + '--ignore', required=False, - action="store", - nargs="+", + action='store', + nargs='+', default=[], - choices=["fieldmaps", "slicetiming", "sbref", "t2w", "flair"], + choices=['fieldmaps', 'slicetiming', 'sbref', 't2w', 'flair'], help=( - "Ignore selected aspects of the input dataset to disable corresponding " - "parts of the workflow (a space delimited list)" + 'Ignore selected aspects of the input dataset to disable corresponding ' + 'parts of the workflow (a space delimited list)' ), ) g_conf.add_argument( - "--output-spaces", - nargs="*", + '--output-spaces', + nargs='*', action=OutputReferencesAction, help="""\ Standard and non-standard spaces to resample denoised functional images to. \ @@ -306,158 +306,157 @@ def _bids_filter(value, parser): Non-standard spaces imply specific orientations and sampling grids. \ For further details, please check out \ https://fmriprep.readthedocs.io/en/%s/spaces.html""" - % (currentv.base_version if is_release else "latest"), + % (currentv.base_version if is_release else 'latest'), ) g_conf.add_argument( - "--dummy-scans", + '--dummy-scans', required=False, - action="store", + action='store', default=None, type=int, - help="Number of nonsteady-state volumes. Overrides automatic detection.", + help='Number of nonsteady-state volumes. Overrides automatic detection.', ) g_conf.add_argument( - "--random-seed", - dest="_random_seed", - action="store", + '--random-seed', + dest='_random_seed', + action='store', type=int, default=None, - help="Initialize the random seed for the workflow", + help='Initialize the random seed for the workflow', ) - g_outputs = parser.add_argument_group("Options for modulating outputs") + g_outputs = parser.add_argument_group('Options for modulating outputs') g_outputs.add_argument( - "--md-only-boilerplate", - action="store_true", + '--md-only-boilerplate', + action='store_true', default=False, - help="Skip generation of HTML and LaTeX formatted citation with pandoc", + help='Skip generation of HTML and LaTeX formatted citation with pandoc', ) - g_aroma = parser.add_argument_group("Options for running ICA_AROMA") + g_aroma = parser.add_argument_group('Options for running ICA_AROMA') g_aroma.add_argument( - "--melodic-dimensionality", - dest="melodic_dim", - action="store", + '--melodic-dimensionality', + dest='melodic_dim', + action='store', default=0, type=int, help=( - "Exact or maximum number of MELODIC components to estimate " - "(positive = exact, negative = maximum)" + 'Exact or maximum number of MELODIC components to estimate ' + '(positive = exact, negative = maximum)' ), ) g_aroma.add_argument( - "--error-on-warnings", - dest="error_on_aroma_warnings", - action="store_true", + '--error-on-warnings', + dest='error_on_aroma_warnings', + action='store_true', default=False, help=( - "Raise an error if ICA_AROMA does not produce sensible output " - "(e.g., if all the components are classified as signal or noise)" + 'Raise an error if ICA_AROMA does not produce sensible output ' + '(e.g., if all the components are classified as signal or noise)' ), ) - g_carbon = parser.add_argument_group("Options for carbon usage tracking") + g_carbon = parser.add_argument_group('Options for carbon usage tracking') g_carbon.add_argument( - "--track-carbon", - action="store_true", - help="Tracks power draws using CodeCarbon package", + '--track-carbon', + action='store_true', + help='Tracks power draws using CodeCarbon package', ) g_carbon.add_argument( - "--country-code", - action="store", - default="CAN", + '--country-code', + action='store', + default='CAN', type=str, - help="Country ISO code used by carbon trackers", + help='Country ISO code used by carbon trackers', ) - g_other = parser.add_argument_group("Other options") - g_other.add_argument("--version", action="version", version=verstr) + g_other = parser.add_argument_group('Other options') + g_other.add_argument('--version', action='version', version=verstr) g_other.add_argument( - "-v", - "--verbose", - dest="verbose_count", - action="count", + '-v', + '--verbose', + dest='verbose_count', + action='count', default=0, - help="Increases log verbosity for each occurrence, debug level is -vvv", + help='Increases log verbosity for each occurrence, debug level is -vvv', ) g_other.add_argument( - "-w", - "--work-dir", - action="store", + '-w', + '--work-dir', + action='store', type=Path, - default=Path("work").absolute(), - help="Path where intermediate results should be stored", + default=Path('work').absolute(), + help='Path where intermediate results should be stored', ) g_other.add_argument( - "--clean-workdir", - action="store_true", + '--clean-workdir', + action='store_true', default=False, - help="Clears working directory of contents. Use of this flag is not " - "recommended when running concurrent processes of fMRIPost-AROMA.", + help='Clears working directory of contents. Use of this flag is not ' + 'recommended when running concurrent processes of fMRIPost-AROMA.', ) g_other.add_argument( - "--resource-monitor", - action="store_true", + '--resource-monitor', + action='store_true', default=False, help="Enable Nipype's resource monitoring to keep track of memory and CPU usage", ) g_other.add_argument( - "--config-file", - action="store", - metavar="FILE", - help="Use pre-generated configuration file. Values in file will be overridden " - "by command-line arguments.", + '--config-file', + action='store', + metavar='FILE', + help='Use pre-generated configuration file. Values in file will be overridden ' + 'by command-line arguments.', ) g_other.add_argument( - "--write-graph", - action="store_true", + '--write-graph', + action='store_true', default=False, - help="Write workflow graph.", + help='Write workflow graph.', ) g_other.add_argument( - "--stop-on-first-crash", - action="store_true", + '--stop-on-first-crash', + action='store_true', default=False, - help="Force stopping on first crash, even if a work directory was specified.", + help='Force stopping on first crash, even if a work directory was specified.', ) g_other.add_argument( - "--notrack", - action="store_true", + '--notrack', + action='store_true', default=False, - help="Opt-out of sending tracking information of this run to " - "the FMRIPREP developers. This information helps to " - "improve FMRIPREP and provides an indicator of real " - "world usage crucial for obtaining funding.", + help='Opt-out of sending tracking information of this run to ' + 'the FMRIPREP developers. This information helps to ' + 'improve FMRIPREP and provides an indicator of real ' + 'world usage crucial for obtaining funding.', ) g_other.add_argument( - "--debug", - action="store", - nargs="+", - choices=config.DEBUG_MODES + ("all",), + '--debug', + action='store', + nargs='+', + choices=config.DEBUG_MODES + ('all',), help="Debug mode(s) to enable. 'all' is alias for all available modes.", ) latest = check_latest() if latest is not None and currentv < latest: print( - """\ -You are using fMRIPost-AROMA-%s, and a newer version of fMRIPost-AROMA is available: %s. + f"""\ +You are using fMRIPost-AROMA-{currentv}, +and a newer version of fMRIPost-AROMA is available: {latest}. Please check out our documentation about how and when to upgrade: -https://fmriprep.readthedocs.io/en/latest/faq.html#upgrading""" - % (currentv, latest), +https://fmriprep.readthedocs.io/en/latest/faq.html#upgrading""", file=sys.stderr, ) _blist = is_flagged() if _blist[0]: - _reason = _blist[1] or "unknown" + _reason = _blist[1] or 'unknown' print( - """\ -WARNING: Version %s of fMRIPost-AROMA (current) has been FLAGGED -(reason: %s). + f"""\ +WARNING: Version {config.environment.version} of fMRIPost-AROMA (current) has been FLAGGED +(reason: {_reason}). That means some severe flaw was found in it and we strongly -discourage its usage.""" - % (config.environment.version, _reason), +discourage its usage.""", file=sys.stderr, ) @@ -473,23 +472,23 @@ def parse_args(args=None, namespace=None): opts = parser.parse_args(args, namespace) if opts.config_file: - skip = {} if opts.reports_only else {"execution": ("run_uuid",)} + skip = {} if opts.reports_only else {'execution': ('run_uuid',)} config.load(opts.config_file, skip=skip, init=False) - config.loggers.cli.info(f"Loaded previous configuration file {opts.config_file}") + config.loggers.cli.info(f'Loaded previous configuration file {opts.config_file}') config.execution.log_level = int(max(25 - 5 * opts.verbose_count, logging.DEBUG)) - config.from_dict(vars(opts), init=["nipype"]) + config.from_dict(vars(opts), init=['nipype']) if not config.execution.notrack: import pkgutil - if pkgutil.find_loader("sentry_sdk") is None: + if pkgutil.find_loader('sentry_sdk') is None: config.execution.notrack = True - config.loggers.cli.warning("Telemetry disabled because sentry_sdk is not installed.") + config.loggers.cli.warning('Telemetry disabled because sentry_sdk is not installed.') else: config.loggers.cli.info( - "Telemetry system to collect crashes and errors is enabled " - "- thanks for your feedback!. Use option ``--notrack`` to opt out." + 'Telemetry system to collect crashes and errors is enabled ' + '- thanks for your feedback!. Use option ``--notrack`` to opt out.' ) # Retrieve logging level @@ -500,13 +499,13 @@ def parse_args(args=None, namespace=None): import yaml with open(opts.use_plugin) as f: - plugin_settings = yaml.load(f, Loader=yaml.FullLoader) - _plugin = plugin_settings.get("plugin") + plugin_settings = yaml.safe_load(f) + _plugin = plugin_settings.get('plugin') if _plugin: config.nipype.plugin = _plugin - config.nipype.plugin_args = plugin_settings.get("plugin_args", {}) + config.nipype.plugin_args = plugin_settings.get('plugin_args', {}) config.nipype.nprocs = opts.nprocs or config.nipype.plugin_args.get( - "n_procs", config.nipype.nprocs + 'n_procs', config.nipype.nprocs ) # Resource management options @@ -514,8 +513,8 @@ def parse_args(args=None, namespace=None): # This may need to be revisited if people try to use batch plugins if 1 < config.nipype.nprocs < config.nipype.omp_nthreads: build_log.warning( - f"Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed " - f"total threads (--nthreads/--n_cpus={config.nipype.nprocs})" + f'Per-process threads (--omp-nthreads={config.nipype.omp_nthreads}) exceed ' + f'total threads (--nthreads/--n_cpus={config.nipype.nprocs})' ) bids_dir = config.execution.bids_dir @@ -527,9 +526,9 @@ def parse_args(args=None, namespace=None): if opts.clean_workdir and work_dir.exists(): from niworkflows.utils.misc import clean_directory - build_log.info(f"Clearing previous fMRIPost-AROMA working directory: {work_dir}") + build_log.info(f'Clearing previous fMRIPost-AROMA working directory: {work_dir}') if not clean_directory(work_dir): - build_log.warning(f"Could not clear all contents of working directory: {work_dir}") + build_log.warning(f'Could not clear all contents of working directory: {work_dir}') # Update the config with an empty dict to trigger initialization of all config # sections (we used `init=False` above). @@ -547,8 +546,8 @@ def parse_args(args=None, namespace=None): if bids_dir in work_dir.parents: parser.error( - "The selected working directory is a subdirectory of the input BIDS folder. " - "Please modify the output path." + 'The selected working directory is a subdirectory of the input BIDS folder. ' + 'Please modify the output path.' ) # Validate inputs @@ -556,13 +555,13 @@ def parse_args(args=None, namespace=None): from fmripost_aroma.utils.bids import validate_input_dir build_log.info( - "Making sure the input data is BIDS compliant " - "(warnings can be ignored in most cases)." + 'Making sure the input data is BIDS compliant ' + '(warnings can be ignored in most cases).' ) validate_input_dir(config.environment.exec_env, opts.bids_dir, opts.participant_label) # Setup directories - config.execution.log_dir = config.execution.fmriprep_dir / "logs" + config.execution.log_dir = config.execution.fmriprep_dir / 'logs' # Check and create output and working directories config.execution.log_dir.mkdir(exist_ok=True, parents=True) work_dir.mkdir(exist_ok=True, parents=True) diff --git a/src/fmripost_aroma/cli/run.py b/src/fmripost_aroma/cli/run.py index ebeaeb9..0dac556 100644 --- a/src/fmripost_aroma/cli/run.py +++ b/src/fmripost_aroma/cli/run.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # @@ -48,20 +47,20 @@ def main(): from codecarbon import OfflineEmissionsTracker country_iso_code = config.execution.country_code - config.loggers.workflow.log(25, "CodeCarbon tracker started ...") - config.loggers.workflow.log(25, f"Using country_iso_code: {country_iso_code}") - config.loggers.workflow.log(25, f"Saving logs at: {config.execution.log_dir}") + config.loggers.workflow.log(25, 'CodeCarbon tracker started ...') + config.loggers.workflow.log(25, f'Using country_iso_code: {country_iso_code}') + config.loggers.workflow.log(25, f'Saving logs at: {config.execution.log_dir}') tracker = OfflineEmissionsTracker( output_dir=config.execution.log_dir, country_iso_code=country_iso_code ) tracker.start() - if "pdb" in config.execution.debug: + if 'pdb' in config.execution.debug: from fmripost_aroma.utils.debug import setup_exceptionhook setup_exceptionhook() - config.nipype.plugin = "Linear" + config.nipype.plugin = 'Linear' sentry_sdk = None if not config.execution.notrack and not config.execution.debug: @@ -78,14 +77,14 @@ def main(): # CRITICAL Save the config to a file. This is necessary because the execution graph # is built as a separate process to keep the memory footprint low. The most # straightforward way to communicate with the child process is via the filesystem. - config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml" + config_file = config.execution.work_dir / config.execution.run_uuid / 'config.toml' config_file.parent.mkdir(exist_ok=True, parents=True) config.to_filename(config_file) # CRITICAL Call build_workflow(config_file, retval) in a subprocess. # Because Python on Linux does not ever free virtual memory (VM), running the # workflow construction jailed within a process preempts excessive VM buildup. - if "pdb" not in config.execution.debug: + if 'pdb' not in config.execution.debug: with Manager() as mgr: retval = mgr.dict() p = Process(target=build_workflow, args=(str(config_file), retval)) @@ -94,14 +93,14 @@ def main(): retval = dict(retval.items()) # Convert to base dictionary if p.exitcode: - retval["return_code"] = p.exitcode + retval['return_code'] = p.exitcode else: retval = build_workflow(str(config_file), {}) global EXITCODE - EXITCODE = retval.get("return_code", 0) - fmripost_aroma_wf = retval.get("workflow", None) + EXITCODE = retval.get('return_code', 0) + fmripost_aroma_wf = retval.get('workflow', None) # CRITICAL Load the config from the file. This is necessary because the ``build_workflow`` # function executed constrained in a process may change the config (and thus the global @@ -112,7 +111,7 @@ def main(): sys.exit(int(EXITCODE > 0)) if fmripost_aroma_wf and config.execution.write_graph: - fmripost_aroma_wf.write_graph(graph2use="colored", format="svg", simple_form=True) + fmripost_aroma_wf.write_graph(graph2use='colored', format='svg', simple_form=True) EXITCODE = EXITCODE or (fmripost_aroma_wf is None) * EX_SOFTWARE if EXITCODE != 0: @@ -135,18 +134,16 @@ def main(): # Sentry tracking if sentry_sdk is not None: with sentry_sdk.configure_scope() as scope: - scope.set_tag("run_uuid", config.execution.run_uuid) - scope.set_tag("npart", len(config.execution.participant_label)) - sentry_sdk.add_breadcrumb(message="fMRIPost-AROMA started", level="info") - sentry_sdk.capture_message("fMRIPost-AROMA started", level="info") + scope.set_tag('run_uuid', config.execution.run_uuid) + scope.set_tag('npart', len(config.execution.participant_label)) + sentry_sdk.add_breadcrumb(message='fMRIPost-AROMA started', level='info') + sentry_sdk.capture_message('fMRIPost-AROMA started', level='info') config.loggers.workflow.log( 15, - "\n".join( - ["fMRIPost-AROMA config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()] - ), + '\n'.join(['fMRIPost-AROMA config:'] + [f'\t\t{s}' for s in config.dumps().splitlines()]), ) - config.loggers.workflow.log(25, "fMRIPost-AROMA started!") + config.loggers.workflow.log(25, 'fMRIPost-AROMA started!') errno = 1 # Default is error exit unless otherwise set try: fmripost_aroma_wf.run(**config.nipype.get_plugin()) @@ -156,54 +153,54 @@ def main(): crashfolders = [ config.execution.fmripost_aroma_dir - / f"sub-{s}" - / "log" + / f'sub-{s}' + / 'log' / config.execution.run_uuid for s in config.execution.participant_label ] for crashfolder in crashfolders: - for crashfile in crashfolder.glob("crash*.*"): + for crashfile in crashfolder.glob('crash*.*'): process_crashfile(crashfile) - if sentry_sdk is not None and "Workflow did not execute cleanly" not in str(e): + if sentry_sdk is not None and 'Workflow did not execute cleanly' not in str(e): sentry_sdk.capture_exception(e) - config.loggers.workflow.critical("fMRIPost-AROMA failed: %s", e) + config.loggers.workflow.critical('fMRIPost-AROMA failed: %s', e) raise else: - config.loggers.workflow.log(25, "fMRIPost-AROMA finished successfully!") + config.loggers.workflow.log(25, 'fMRIPost-AROMA finished successfully!') if sentry_sdk is not None: - success_message = "fMRIPost-AROMA finished without errors" - sentry_sdk.add_breadcrumb(message=success_message, level="info") - sentry_sdk.capture_message(success_message, level="info") + success_message = 'fMRIPost-AROMA finished without errors' + sentry_sdk.add_breadcrumb(message=success_message, level='info') + sentry_sdk.capture_message(success_message, level='info') # Bother users with the boilerplate only iff the workflow went okay. - boiler_file = config.execution.fmripost_aroma_dir / "logs" / "CITATION.md" + boiler_file = config.execution.fmripost_aroma_dir / 'logs' / 'CITATION.md' if boiler_file.exists(): if config.environment.exec_env in ( - "singularity", - "docker", - "fmripost_aroma-docker", + 'singularity', + 'docker', + 'fmripost_aroma-docker', ): - boiler_file = Path("") / boiler_file.relative_to( + boiler_file = Path('') / boiler_file.relative_to( config.execution.output_dir ) config.loggers.workflow.log( 25, - "Works derived from this fMRIPost-AROMA execution should include the " - f"boilerplate text found in {boiler_file}.", + 'Works derived from this fMRIPost-AROMA execution should include the ' + f'boilerplate text found in {boiler_file}.', ) if config.workflow.run_reconall: from niworkflows.utils.misc import _copy_any from templateflow import api - dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"])) - _copy_any(dseg_tsv, str(config.execution.fmripost_aroma_dir / "desc-aseg_dseg.tsv")) + dseg_tsv = str(api.get('fsaverage', suffix='dseg', extension=['.tsv'])) + _copy_any(dseg_tsv, str(config.execution.fmripost_aroma_dir / 'desc-aseg_dseg.tsv')) _copy_any( - dseg_tsv, str(config.execution.fmripost_aroma_dir / "desc-aparcaseg_dseg.tsv") + dseg_tsv, str(config.execution.fmripost_aroma_dir / 'desc-aparcaseg_dseg.tsv') ) errno = 0 finally: @@ -212,9 +209,9 @@ def main(): # Code Carbon if config.execution.track_carbon: emissions: float = tracker.stop() - config.loggers.workflow.log(25, "CodeCarbon tracker has stopped.") - config.loggers.workflow.log(25, f"Saving logs at: {config.execution.log_dir}") - config.loggers.workflow.log(25, f"Carbon emissions: {emissions} kg") + config.loggers.workflow.log(25, 'CodeCarbon tracker has stopped.') + config.loggers.workflow.log(25, f'Saving logs at: {config.execution.log_dir}') + config.loggers.workflow.log(25, f'Carbon emissions: {emissions} kg') from fmripost_aroma.reports.core import generate_reports @@ -223,8 +220,8 @@ def main(): config.execution.participant_label, config.execution.fmripost_aroma_dir, config.execution.run_uuid, - config=pkgrf("fmripost_aroma", "data/reports-spec.yml"), - packagename="fmripost_aroma", + config=pkgrf('fmripost_aroma', 'data/reports-spec.yml'), + packagename='fmripost_aroma', ) write_derivative_description( config.execution.bids_dir, config.execution.fmripost_aroma_dir @@ -233,8 +230,8 @@ def main(): if sentry_sdk is not None and failed_reports: sentry_sdk.capture_message( - "Report generation failed for %d subjects" % failed_reports, - level="error", + 'Report generation failed for %d subjects' % failed_reports, + level='error', ) sys.exit(int((errno + failed_reports) > 0)) @@ -250,30 +247,30 @@ def migas_exit() -> None: from fmripost_aroma.utils.telemetry import send_breadcrumb global EXITCODE - migas_kwargs = {"status": "C"} + migas_kwargs = {'status': 'C'} # `sys` will not have these attributes unless an error has been handled - if hasattr(sys, "last_type"): + if hasattr(sys, 'last_type'): migas_kwargs = { - "status": "F", - "status_desc": "Finished with error(s)", - "error_type": sys.last_type, - "error_desc": sys.last_value, + 'status': 'F', + 'status_desc': 'Finished with error(s)', + 'error_type': sys.last_type, + 'error_desc': sys.last_value, } elif EXITCODE != 0: migas_kwargs.update( { - "status": "F", - "status_desc": f"Completed with exitcode {EXITCODE}", + 'status': 'F', + 'status_desc': f'Completed with exitcode {EXITCODE}', } ) else: - migas_kwargs["status_desc"] = "Success" + migas_kwargs['status_desc'] = 'Success' send_breadcrumb(**migas_kwargs) -if __name__ == "__main__": +if __name__ == '__main__': raise RuntimeError( - "fmripost_aroma/cli/run.py should not be run directly;\n" - "Please `pip install` fmripost_aroma and use the `fmripost_aroma` command" + 'fmripost_aroma/cli/run.py should not be run directly;\n' + 'Please `pip install` fmripost_aroma and use the `fmripost_aroma` command' ) diff --git a/src/fmripost_aroma/cli/version.py b/src/fmripost_aroma/cli/version.py index 93c5b04..5b15cf1 100644 --- a/src/fmripost_aroma/cli/version.py +++ b/src/fmripost_aroma/cli/version.py @@ -22,7 +22,8 @@ # """Version CLI helpers.""" -from datetime import datetime +from contextlib import suppress +from datetime import datetime, timezone from pathlib import Path import requests @@ -30,7 +31,7 @@ from fmripost_aroma import __version__ RELEASE_EXPIRY_DAYS = 14 -DATE_FMT = "%Y%m%d" +DATE_FMT = '%Y%m%d' def check_latest(): @@ -40,7 +41,8 @@ def check_latest(): latest = None date = None outdated = None - cachefile = Path.home() / ".cache" / "fmripost_aroma" / "latest" + now = datetime.now(tz=timezone.utc) + cachefile = Path.home() / '.cache' / 'fmripost_aroma' / 'latest' try: cachefile.parent.mkdir(parents=True, exist_ok=True) except OSError: @@ -48,27 +50,26 @@ def check_latest(): if cachefile and cachefile.exists(): try: - latest, date = cachefile.read_text().split("|") - except Exception: + latest, date = cachefile.read_text().split('|') + except Exception: # noqa: S110, BLE001 pass else: try: latest = Version(latest) - date = datetime.strptime(date, DATE_FMT) + date = datetime.strptime(date, DATE_FMT).astimezone(timezone.utc) except (InvalidVersion, ValueError): latest = None else: - if abs((datetime.now() - date).days) > RELEASE_EXPIRY_DAYS: + if abs((now - date).days) > RELEASE_EXPIRY_DAYS: outdated = True if latest is None or outdated is True: - try: - response = requests.get(url="https://pypi.org/pypi/fmripost_aroma/json", timeout=1.0) - except Exception: - response = None + response = None + with suppress(Exception): + response = requests.get(url='https://pypi.org/pypi/fmripost_aroma/json', timeout=1.0) if response and response.status_code == 200: - versions = [Version(rel) for rel in response.json()["releases"].keys()] + versions = [Version(rel) for rel in response.json()['releases'].keys()] versions = [rel for rel in versions if not rel.is_prerelease] if versions: latest = sorted(versions)[-1] @@ -76,10 +77,8 @@ def check_latest(): latest = None if cachefile is not None and latest is not None: - try: - cachefile.write_text("|".join(("%s" % latest, datetime.now().strftime(DATE_FMT)))) - except Exception: - pass + with suppress(OSError): + cachefile.write_text(f'{latest}|{now.strftime(DATE_FMT)}') return latest @@ -87,18 +86,17 @@ def check_latest(): def is_flagged(): """Check whether current version is flagged.""" # https://raw.githubusercontent.com/nipreps/fmripost_aroma/main/.versions.json - flagged = tuple() - try: + flagged = () + response = None + with suppress(Exception): response = requests.get( url="""\ https://raw.githubusercontent.com/nipreps/fmripost_aroma/main/.versions.json""", timeout=1.0, ) - except Exception: - response = None if response and response.status_code == 200: - flagged = response.json().get("flagged", {}) or {} + flagged = response.json().get('flagged', {}) or {} if __version__ in flagged: return True, flagged[__version__] diff --git a/src/fmripost_aroma/cli/workflow.py b/src/fmripost_aroma/cli/workflow.py index 2082232..c1c7bf7 100644 --- a/src/fmripost_aroma/cli/workflow.py +++ b/src/fmripost_aroma/cli/workflow.py @@ -52,30 +52,30 @@ def build_workflow(config_file, retval): fmripost_aroma_dir = config.execution.fmripost_aroma_dir version = config.environment.version - retval["return_code"] = 1 - retval["workflow"] = None + retval['return_code'] = 1 + retval['workflow'] = None - banner = [f"Running fMRIPost-AROMA version {version}"] - notice_path = Path(pkgrf("fmripost_aroma", "data/NOTICE")) + banner = [f'Running fMRIPost-AROMA version {version}'] + notice_path = Path(pkgrf('fmripost_aroma', 'data/NOTICE')) if notice_path.exists(): - banner[0] += "\n" + banner[0] += '\n' banner += [f"License NOTICE {'#' * 50}"] - banner += [f"fMRIPost-AROMA {version}"] + banner += [f'fMRIPost-AROMA {version}'] banner += notice_path.read_text().splitlines(keepends=False)[1:] - banner += ["#" * len(banner[1])] + banner += ['#' * len(banner[1])] build_log.log(25, f"\n{' ' * 9}".join(banner)) # warn if older results exist: check for dataset_description.json in output folder msg = check_pipeline_version( - "fMRIPost-AROMA", + 'fMRIPost-AROMA', version, - fmripost_aroma_dir / "dataset_description.json", + fmripost_aroma_dir / 'dataset_description.json', ) if msg is not None: build_log.warning(msg) # Please note this is the input folder's dataset_description.json - dset_desc_path = config.execution.bids_dir / "dataset_description.json" + dset_desc_path = config.execution.bids_dir / 'dataset_description.json' if dset_desc_path.exists(): from hashlib import sha256 @@ -90,34 +90,34 @@ def build_workflow(config_file, retval): # Called with reports only if config.execution.reports_only: - build_log.log(25, "Running --reports-only on participants %s", ", ".join(subject_list)) - retval["return_code"] = generate_reports( + build_log.log(25, 'Running --reports-only on participants %s', ', '.join(subject_list)) + retval['return_code'] = generate_reports( config.execution.participant_label, config.execution.fmripost_aroma_dir, config.execution.run_uuid, - config=pkgrf("fmripost_aroma", "data/reports-spec.yml"), - packagename="fmripost_aroma", + config=pkgrf('fmripost_aroma', 'data/reports-spec.yml'), + packagename='fmripost_aroma', ) return retval # Build main workflow init_msg = [ "Building fMRIPost-AROMA's workflow:", - f"BIDS dataset path: {config.execution.bids_dir}.", - f"Participant list: {subject_list}.", - f"Run identifier: {config.execution.run_uuid}.", - f"Output spaces: {config.execution.output_spaces}.", + f'BIDS dataset path: {config.execution.bids_dir}.', + f'Participant list: {subject_list}.', + f'Run identifier: {config.execution.run_uuid}.', + f'Output spaces: {config.execution.output_spaces}.', ] if config.execution.derivatives: - init_msg += [f"Searching for derivatives: {config.execution.derivatives}."] + init_msg += [f'Searching for derivatives: {config.execution.derivatives}.'] if config.execution.fs_subjects_dir: init_msg += [f"Pre-run FreeSurfer's SUBJECTS_DIR: {config.execution.fs_subjects_dir}."] build_log.log(25, f"\n{' ' * 11}* ".join(init_msg)) - retval["workflow"] = init_fmripost_aroma_wf() + retval['workflow'] = init_fmripost_aroma_wf() # Check for FS license after building the workflow if not check_valid_fs_license(): @@ -140,25 +140,25 @@ def build_workflow(config_file, retval): 3) the ``$FREESURFER_HOME/license.txt`` path. \ Get it (for free) by registering at https://surfer.nmr.mgh.harvard.edu/registration.html""" ) - retval["return_code"] = 126 # 126 == Command invoked cannot execute. + retval['return_code'] = 126 # 126 == Command invoked cannot execute. return retval # Check workflow for missing commands - missing = check_deps(retval["workflow"]) + missing = check_deps(retval['workflow']) if missing: build_log.critical( - "Cannot run fMRIPost-AROMA. Missing dependencies:%s", - "\n\t* ".join([""] + [f"{cmd} (Interface: {iface})" for iface, cmd in missing]), + 'Cannot run fMRIPost-AROMA. Missing dependencies:%s', + '\n\t* '.join([''] + [f'{cmd} (Interface: {iface})' for iface, cmd in missing]), ) - retval["return_code"] = 127 # 127 == command not found. + retval['return_code'] = 127 # 127 == command not found. return retval config.to_filename(config_file) build_log.info( - "fMRIPost-AROMA workflow graph with %d nodes built successfully.", - len(retval["workflow"]._get_all_nodes()), + 'fMRIPost-AROMA workflow graph with %d nodes built successfully.', + len(retval['workflow']._get_all_nodes()), ) - retval["return_code"] = 0 + retval['return_code'] = 0 return retval @@ -167,11 +167,9 @@ def build_boilerplate(config_file, workflow): from fmripost_aroma import config config.load(config_file) - logs_path = config.execution.fmripost_aroma_dir / "logs" + logs_path = config.execution.fmripost_aroma_dir / 'logs' boilerplate = workflow.visit_desc() - citation_files = { - ext: logs_path / ("CITATION.%s" % ext) for ext in ("bib", "tex", "md", "html") - } + citation_files = {ext: logs_path / f'CITATION.{ext}' for ext in ('bib', 'tex', 'md', 'html')} if boilerplate: # To please git-annex users and also to guarantee consistency @@ -183,55 +181,55 @@ def build_boilerplate(config_file, workflow): except FileNotFoundError: pass - citation_files["md"].write_text(boilerplate) + citation_files['md'].write_text(boilerplate) - if not config.execution.md_only_boilerplate and citation_files["md"].exists(): + if not config.execution.md_only_boilerplate and citation_files['md'].exists(): from pathlib import Path from subprocess import CalledProcessError, TimeoutExpired, check_call from pkg_resources import resource_filename as pkgrf - bib_text = Path(pkgrf("fmripost_aroma", "data/boilerplate.bib")).read_text() - citation_files["bib"].write_text( + bib_text = Path(pkgrf('fmripost_aroma', 'data/boilerplate.bib')).read_text() + citation_files['bib'].write_text( bib_text.replace( - "fMRIPost-AROMA ", - f"fMRIPost-AROMA {config.environment.version}", + 'fMRIPost-AROMA ', + f'fMRIPost-AROMA {config.environment.version}', ) ) # Generate HTML file resolving citations cmd = [ - "pandoc", - "-s", - "--bibliography", - str(citation_files["bib"]), - "--citeproc", - "--metadata", + 'pandoc', + '-s', + '--bibliography', + str(citation_files['bib']), + '--citeproc', + '--metadata', 'pagetitle="fMRIPost-AROMA citation boilerplate"', - str(citation_files["md"]), - "-o", - str(citation_files["html"]), + str(citation_files['md']), + '-o', + str(citation_files['html']), ] - config.loggers.cli.info("Generating an HTML version of the citation boilerplate...") + config.loggers.cli.info('Generating an HTML version of the citation boilerplate...') try: check_call(cmd, timeout=10) except (FileNotFoundError, CalledProcessError, TimeoutExpired): - config.loggers.cli.warning("Could not generate CITATION.html file:\n%s", " ".join(cmd)) + config.loggers.cli.warning('Could not generate CITATION.html file:\n%s', ' '.join(cmd)) # Generate LaTex file resolving citations cmd = [ - "pandoc", - "-s", - "--bibliography", - str(citation_files["bib"]), - "--natbib", - str(citation_files["md"]), - "-o", - str(citation_files["tex"]), + 'pandoc', + '-s', + '--bibliography', + str(citation_files['bib']), + '--natbib', + str(citation_files['md']), + '-o', + str(citation_files['tex']), ] - config.loggers.cli.info("Generating a LaTeX version of the citation boilerplate...") + config.loggers.cli.info('Generating a LaTeX version of the citation boilerplate...') try: check_call(cmd, timeout=10) except (FileNotFoundError, CalledProcessError, TimeoutExpired): - config.loggers.cli.warning("Could not generate CITATION.tex file:\n%s", " ".join(cmd)) + config.loggers.cli.warning('Could not generate CITATION.tex file:\n%s', ' '.join(cmd)) diff --git a/src/fmripost_aroma/config.py b/src/fmripost_aroma/config.py index 8615933..2e6284a 100644 --- a/src/fmripost_aroma/config.py +++ b/src/fmripost_aroma/config.py @@ -94,14 +94,14 @@ from templateflow.conf import TF_LAYOUT # Disable NiPype etelemetry always -_disable_et = bool(os.getenv("NO_ET") is not None or os.getenv("NIPYPE_NO_ET") is not None) -os.environ["NIPYPE_NO_ET"] = "1" -os.environ["NO_ET"] = "1" +_disable_et = bool(os.getenv('NO_ET') is not None or os.getenv('NIPYPE_NO_ET') is not None) +os.environ['NIPYPE_NO_ET'] = '1' +os.environ['NO_ET'] = '1' -CONFIG_FILENAME = "fmripost_aroma.toml" +CONFIG_FILENAME = 'fmripost_aroma.toml' try: - set_start_method("forkserver") + set_start_method('forkserver') except RuntimeError: pass # context has been already set finally: @@ -118,28 +118,28 @@ from . import __version__ -if not hasattr(sys, "_is_pytest_session"): +if not hasattr(sys, '_is_pytest_session'): sys._is_pytest_session = False # Trick to avoid sklearn's FutureWarnings # Disable all warnings in main and children processes only on production versions if not any( ( - "+" in __version__, - __version__.endswith(".dirty"), - os.getenv("FMRIPREP_DEV", "0").lower() in ("1", "on", "true", "y", "yes"), + '+' in __version__, + __version__.endswith('.dirty'), + os.getenv('FMRIPREP_DEV', '0').lower() in ('1', 'on', 'true', 'y', 'yes'), ) ): from ._warnings import logging - os.environ["PYTHONWARNINGS"] = "ignore" -elif os.getenv("FMRIPREP_WARNINGS", "0").lower() in ("1", "on", "true", "y", "yes"): + os.environ['PYTHONWARNINGS'] = 'ignore' +elif os.getenv('FMRIPREP_WARNINGS', '0').lower() in ('1', 'on', 'true', 'y', 'yes'): # allow disabling warnings on development versions # https://github.com/nipreps/fmripost_aroma/pull/2080#discussion_r409118765 from ._warnings import logging else: import logging -logging.addLevelName(25, "IMPORTANT") # Add a new level between INFO and WARNING -logging.addLevelName(15, "VERBOSE") # Add a new level between INFO and DEBUG +logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING +logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG DEFAULT_MEMORY_MIN_GB = 0.01 @@ -153,29 +153,29 @@ from requests import get as _get_url with suppress((ConnectionError, ReadTimeout)): - _get_url("https://rig.mit.edu/et/projects/nipy/nipype", timeout=0.05) + _get_url('https://rig.mit.edu/et/projects/nipy/nipype', timeout=0.05) # Execution environment _exec_env = os.name _docker_ver = None # special variable set in the container -if os.getenv("IS_DOCKER_8395080871"): - _exec_env = "singularity" - _cgroup = Path("/proc/1/cgroup") - if _cgroup.exists() and "docker" in _cgroup.read_text(): - _docker_ver = os.getenv("DOCKER_VERSION_8395080871") - _exec_env = "fmripost_aroma-docker" if _docker_ver else "docker" +if os.getenv('IS_DOCKER_8395080871'): + _exec_env = 'singularity' + _cgroup = Path('/proc/1/cgroup') + if _cgroup.exists() and 'docker' in _cgroup.read_text(): + _docker_ver = os.getenv('DOCKER_VERSION_8395080871') + _exec_env = 'fmripost_aroma-docker' if _docker_ver else 'docker' del _cgroup -_fs_license = os.getenv("FS_LICENSE") -if not _fs_license and os.getenv("FREESURFER_HOME"): - _fs_home = os.getenv("FREESURFER_HOME") - if _fs_home and (Path(_fs_home) / "license.txt").is_file(): - _fs_license = str(Path(_fs_home) / "license.txt") +_fs_license = os.getenv('FS_LICENSE') +if not _fs_license and os.getenv('FREESURFER_HOME'): + _fs_home = os.getenv('FREESURFER_HOME') + if _fs_home and (Path(_fs_home) / 'license.txt').is_file(): + _fs_license = str(Path(_fs_home) / 'license.txt') del _fs_home _templateflow_home = Path( - os.getenv("TEMPLATEFLOW_HOME", os.path.join(os.getenv("HOME"), ".cache", "templateflow")) + os.getenv('TEMPLATEFLOW_HOME', os.path.join(os.getenv('HOME'), '.cache', 'templateflow')) ) try: @@ -185,28 +185,28 @@ except ImportError: _free_mem_at_start = None -_oc_limit = "n/a" -_oc_policy = "n/a" +_oc_limit = 'n/a' +_oc_policy = 'n/a' try: # Memory policy may have a large effect on types of errors experienced - _proc_oc_path = Path("/proc/sys/vm/overcommit_memory") + _proc_oc_path = Path('/proc/sys/vm/overcommit_memory') if _proc_oc_path.exists(): - _oc_policy = {"0": "heuristic", "1": "always", "2": "never"}.get( - _proc_oc_path.read_text().strip(), "unknown" + _oc_policy = {'0': 'heuristic', '1': 'always', '2': 'never'}.get( + _proc_oc_path.read_text().strip(), 'unknown' ) - if _oc_policy != "never": - _proc_oc_kbytes = Path("/proc/sys/vm/overcommit_kbytes") + if _oc_policy != 'never': + _proc_oc_kbytes = Path('/proc/sys/vm/overcommit_kbytes') if _proc_oc_kbytes.exists(): _oc_limit = _proc_oc_kbytes.read_text().strip() - if _oc_limit in ("0", "n/a") and Path("/proc/sys/vm/overcommit_ratio").exists(): - _oc_limit = "{}%".format(Path("/proc/sys/vm/overcommit_ratio").read_text().strip()) + if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists(): + _oc_limit = '{}%'.format(Path('/proc/sys/vm/overcommit_ratio').read_text().strip()) except Exception: # noqa: S110, BLE001 pass # Debug modes are names that influence the exposure of internal details to # the user, either through additional derivatives or increased verbosity -DEBUG_MODES = ("compcor", "fieldmaps", "pdb") +DEBUG_MODES = ('compcor', 'fieldmaps', 'pdb') class _Config: @@ -216,7 +216,7 @@ class _Config: def __init__(self): """Avert instantiation.""" - raise RuntimeError("Configuration type is not instantiable.") + raise RuntimeError('Configuration type is not instantiable.') @classmethod def load(cls, settings, init=True, ignore=None): @@ -248,7 +248,7 @@ def get(cls): out = {} for k, v in cls.__dict__.items(): - if k.startswith("_") or v is None: + if k.startswith('_') or v is None: continue if callable(getattr(cls, k)): continue @@ -258,7 +258,7 @@ def get(cls): else: v = str(v) if isinstance(v, SpatialReferences): - v = " ".join(str(s) for s in v.references) or None + v = ' '.join(str(s) for s in v.references) or None if isinstance(v, Reference): v = str(v) or None out[k] = v @@ -302,7 +302,7 @@ class environment(_Config): class nipype(_Config): """Nipype settings.""" - crashfile_format = "txt" + crashfile_format = 'txt' """The file format for crashfiles, either text (txt) or pickle (pklz).""" get_linked_libs = False """Run NiPype's tool to enlist linked libraries for every interface.""" @@ -312,11 +312,11 @@ class nipype(_Config): """Number of processes (compute tasks) that can be run in parallel (multiprocessing only).""" omp_nthreads = None """Number of CPUs a single process can access for multithreaded execution.""" - plugin = "MultiProc" + plugin = 'MultiProc' """NiPype's execution plugin.""" plugin_args = { - "maxtasksperchild": 1, - "raise_insufficient": False, + 'maxtasksperchild': 1, + 'raise_insufficient': False, } """Settings for NiPype's execution plugin.""" remove_unnecessary_outputs = True @@ -330,13 +330,13 @@ class nipype(_Config): def get_plugin(cls): """Format a dictionary for Nipype consumption.""" out = { - "plugin": cls.plugin, - "plugin_args": cls.plugin_args, + 'plugin': cls.plugin, + 'plugin_args': cls.plugin_args, } - if cls.plugin in ("MultiProc", "LegacyMultiProc"): - out["plugin_args"]["n_procs"] = int(cls.nprocs) + if cls.plugin in ('MultiProc', 'LegacyMultiProc'): + out['plugin_args']['n_procs'] = int(cls.nprocs) if cls.memory_gb: - out["plugin_args"]["memory_gb"] = float(cls.memory_gb) + out['plugin_args']['memory_gb'] = float(cls.memory_gb) return out @classmethod @@ -348,10 +348,10 @@ def init(cls): if cls.resource_monitor: ncfg.update_config( { - "monitoring": { - "enabled": cls.resource_monitor, - "sample_frequency": "0.5", - "summary_append": True, + 'monitoring': { + 'enabled': cls.resource_monitor, + 'sample_frequency': '0.5', + 'summary_append': True, } } ) @@ -360,13 +360,13 @@ def init(cls): # Nipype config (logs and execution) ncfg.update_config( { - "execution": { - "crashdump_dir": str(execution.log_dir), - "crashfile_format": cls.crashfile_format, - "get_linked_libs": cls.get_linked_libs, - "remove_unnecessary_outputs": cls.remove_unnecessary_outputs, - "stop_on_first_crash": cls.stop_on_first_crash, - "check_version": False, # disable future telemetry + 'execution': { + 'crashdump_dir': str(execution.log_dir), + 'crashfile_format': cls.crashfile_format, + 'get_linked_libs': cls.get_linked_libs, + 'remove_unnecessary_outputs': cls.remove_unnecessary_outputs, + 'stop_on_first_crash': cls.stop_on_first_crash, + 'check_version': False, # disable future telemetry } } ) @@ -410,7 +410,7 @@ class execution(_Config): """Do not collect telemetry information for *fMRIPost-AROMA*.""" track_carbon = False """Tracks power draws using CodeCarbon package.""" - country_code = "CAN" + country_code = 'CAN' """Country ISO code used by carbon trackers.""" output_dir = None """Folder where derivatives will be stored.""" @@ -429,7 +429,7 @@ class execution(_Config): """Select a particular task from all available in the dataset.""" templateflow_home = _templateflow_home """The root folder of the TemplateFlow client.""" - work_dir = Path("work").absolute() + work_dir = Path('work').absolute() """Path to a working directory where intermediate results will be available.""" write_graph = False """Write out the computational graph corresponding to the planned preprocessing.""" @@ -437,22 +437,22 @@ class execution(_Config): _layout = None _paths = ( - "bids_dir", - "derivatives", - "bids_database_dir", - "fmripost_aroma_dir", - "layout", - "log_dir", - "output_dir", - "templateflow_home", - "work_dir", + 'bids_dir', + 'derivatives', + 'bids_database_dir', + 'fmripost_aroma_dir', + 'layout', + 'log_dir', + 'output_dir', + 'templateflow_home', + 'work_dir', ) @classmethod def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls.fs_license_file and Path(cls.fs_license_file).is_file(): - os.environ["FS_LICENSE"] = str(cls.fs_license_file) + os.environ['FS_LICENSE'] = str(cls.fs_license_file) if cls._layout is None: import re @@ -460,20 +460,20 @@ def init(cls): from bids.layout import BIDSLayout from bids.layout.index import BIDSLayoutIndexer - _db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / "bids_db") + _db_path = cls.bids_database_dir or (cls.work_dir / cls.run_uuid / 'bids_db') _db_path.mkdir(exist_ok=True, parents=True) # Recommended after PyBIDS 12.1 _indexer = BIDSLayoutIndexer( validate=False, ignore=( - "code", - "stimuli", - "sourcedata", - "models", - re.compile(r"^\."), + 'code', + 'stimuli', + 'sourcedata', + 'models', + re.compile(r'^\.'), re.compile( - r"sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(beh|dwi|eeg|ieeg|meg|perf)" + r'sub-[a-zA-Z0-9]+(/ses-[a-zA-Z0-9]+)?/(beh|dwi|eeg|ieeg|meg|perf)' ), ), ) @@ -495,7 +495,7 @@ def _process_value(value): else: return ( getattr(Query, value[7:-4]) - if not isinstance(value, Query) and "Query" in value + if not isinstance(value, Query) and 'Query' in value else value ) @@ -505,14 +505,14 @@ def _process_value(value): cls.bids_filters[acq][k] = _process_value(v) dataset_links = { - "raw": cls.bids_dir, - "templateflow": Path(TF_LAYOUT.root), + 'raw': cls.bids_dir, + 'templateflow': Path(TF_LAYOUT.root), } for deriv_name, deriv_path in cls.derivatives.items(): dataset_links[deriv_name] = deriv_path cls.dataset_links = dataset_links - if "all" in cls.debug: + if 'all' in cls.debug: cls.debug = list(DEBUG_MODES) @@ -547,18 +547,18 @@ class workflow(_Config): class loggers: """Keep loggers easily accessible (see :py:func:`init`).""" - _fmt = "%(asctime)s,%(msecs)d %(name)-2s %(levelname)-2s:\n\t %(message)s" - _datefmt = "%y%m%d-%H:%M:%S" + _fmt = '%(asctime)s,%(msecs)d %(name)-2s %(levelname)-2s:\n\t %(message)s' + _datefmt = '%y%m%d-%H:%M:%S' default = logging.getLogger() """The root logger.""" - cli = logging.getLogger("cli") + cli = logging.getLogger('cli') """Command-line interface logging.""" - workflow = logging.getLogger("nipype.workflow") + workflow = logging.getLogger('nipype.workflow') """NiPype's workflow logger.""" - interface = logging.getLogger("nipype.interface") + interface = logging.getLogger('nipype.interface') """NiPype's interface logger.""" - utils = logging.getLogger("nipype.utils") + utils = logging.getLogger('nipype.utils') """NiPype's utils logger.""" @classmethod @@ -583,7 +583,7 @@ def init(cls): cls.workflow.setLevel(execution.log_level) cls.utils.setLevel(execution.log_level) ncfg.update_config( - {"logging": {"log_directory": str(execution.log_dir), "log_to_file": True}} + {'logging': {'log_directory': str(execution.log_dir), 'log_to_file': True}} ) @@ -613,7 +613,7 @@ def init(cls): def _set_ants_seed(): """Fix random seed for antsRegistration, antsAI, antsMotionCorr""" val = random.randint(1, 65536) - os.environ["ANTS_RANDOM_SEED"] = str(val) + os.environ['ANTS_RANDOM_SEED'] = str(val) return val @@ -643,10 +643,10 @@ def from_dict(settings, init=True, ignore=None): def initialize(x): return init if init in (True, False) else x in init - nipype.load(settings, init=initialize("nipype"), ignore=ignore) - execution.load(settings, init=initialize("execution"), ignore=ignore) - workflow.load(settings, init=initialize("workflow"), ignore=ignore) - seeds.load(settings, init=initialize("seeds"), ignore=ignore) + nipype.load(settings, init=initialize('nipype'), ignore=ignore) + execution.load(settings, init=initialize('execution'), ignore=ignore) + workflow.load(settings, init=initialize('workflow'), ignore=ignore) + seeds.load(settings, init=initialize('seeds'), ignore=ignore) loggers.init() @@ -674,7 +674,7 @@ def initialize(x): filename = Path(filename) settings = loads(filename.read_text()) for sectionname, configs in settings.items(): - if sectionname != "environment": + if sectionname != 'environment': section = getattr(sys.modules[__name__], sectionname) ignore = skip.get(sectionname) section.load(configs, ignore=ignore, init=initialize(sectionname)) @@ -684,17 +684,17 @@ def initialize(x): def get(flat=False): """Get config as a dict.""" settings = { - "environment": environment.get(), - "execution": execution.get(), - "workflow": workflow.get(), - "nipype": nipype.get(), - "seeds": seeds.get(), + 'environment': environment.get(), + 'execution': execution.get(), + 'workflow': workflow.get(), + 'nipype': nipype.get(), + 'seeds': seeds.get(), } if not flat: return settings return { - ".".join((section, k)): v + '.'.join((section, k)): v for section, configs in settings.items() for k, v in configs.items() } @@ -720,15 +720,15 @@ def init_spaces(checkpoint=True): spaces = execution.output_spaces or SpatialReferences() if not isinstance(spaces, SpatialReferences): spaces = SpatialReferences( - [ref for s in spaces.split(" ") for ref in Reference.from_string(s)] + [ref for s in spaces.split(' ') for ref in Reference.from_string(s)] ) if checkpoint and not spaces.is_cached(): spaces.checkpoint() # Add the default standard space if not already present (required by several sub-workflows) - if "MNI152NLin2009cAsym" not in spaces.get_spaces(nonstandard=False, dim=(3,)): - spaces.add(Reference("MNI152NLin2009cAsym", {})) + if 'MNI152NLin2009cAsym' not in spaces.get_spaces(nonstandard=False, dim=(3,)): + spaces.add(Reference('MNI152NLin2009cAsym', {})) # Ensure user-defined spatial references for outputs are correctly parsed. # Certain options require normalization to a space not explicitly defined by users. @@ -736,8 +736,8 @@ def init_spaces(checkpoint=True): cifti_output = workflow.cifti_output if cifti_output: # CIFTI grayordinates to corresponding FSL-MNI resolutions. - vol_res = "2" if cifti_output == "91k" else "1" - spaces.add(Reference("MNI152NLin6Asym", {"res": vol_res})) + vol_res = '2' if cifti_output == '91k' else '1' + spaces.add(Reference('MNI152NLin6Asym', {'res': vol_res})) # Make the SpatialReferences object available workflow.spaces = spaces diff --git a/src/fmripost_aroma/data/__init__.py b/src/fmripost_aroma/data/__init__.py index 7cb3508..98d9574 100644 --- a/src/fmripost_aroma/data/__init__.py +++ b/src/fmripost_aroma/data/__init__.py @@ -16,15 +16,9 @@ import atexit import os from contextlib import AbstractContextManager, ExitStack -from functools import cached_property +from functools import cache, cached_property from pathlib import Path from types import ModuleType -from typing import Union - -try: - from functools import cache -except ImportError: # PY38 - from functools import lru_cache as cache try: # Prefer backport to leave consistency to dependency spec from importlib_resources import as_file, files @@ -36,7 +30,7 @@ except ImportError: from importlib_resources.abc import Traversable -__all__ = ["load"] +__all__ = ['load'] class Loader: @@ -111,7 +105,7 @@ class Loader: .. automethod:: cached """ - def __init__(self, anchor: Union[str, ModuleType]): + def __init__(self, anchor: str | ModuleType): self._anchor = anchor self.files = files(anchor) self.exit_stack = ExitStack() @@ -128,19 +122,19 @@ def _doc(self): directory. """ top_level = sorted( - os.path.relpath(p, self.files) + "/"[: p.is_dir()] + os.path.relpath(p, self.files) + '/'[: p.is_dir()] for p in self.files.iterdir() - if p.name[0] not in (".", "_") and p.name != "tests" + if p.name[0] not in ('.', '_') and p.name != 'tests' ) doclines = [ - f"Load package files relative to ``{self._anchor}``.", - "", - "This package contains the following (top-level) files/directories:", - "", - *(f"* ``{path}``" for path in top_level), + f'Load package files relative to ``{self._anchor}``.', + '', + 'This package contains the following (top-level) files/directories:', + '', + *(f'* ``{path}``' for path in top_level), ] - return "\n".join(doclines) + return '\n'.join(doclines) def readable(self, *segments) -> Traversable: """Provide read access to a resource through a Path-like interface. @@ -164,7 +158,7 @@ def as_path(self, *segments) -> AbstractContextManager[Path]: """ return as_file(self.files.joinpath(*segments)) - @cache + @cache # noqa: B019 def cached(self, *segments) -> Path: """Ensure data is available as a :class:`~pathlib.Path`. diff --git a/src/fmripost_aroma/interfaces/bids.py b/src/fmripost_aroma/interfaces/bids.py index d620acd..8a7fe1f 100644 --- a/src/fmripost_aroma/interfaces/bids.py +++ b/src/fmripost_aroma/interfaces/bids.py @@ -3,7 +3,7 @@ from nipype import logging from niworkflows.interfaces.bids import DerivativesDataSink as BaseDerivativesDataSink -LOGGER = logging.getLogger("nipype.interface") +LOGGER = logging.getLogger('nipype.interface') class DerivativesDataSink(BaseDerivativesDataSink): @@ -12,4 +12,4 @@ class DerivativesDataSink(BaseDerivativesDataSink): A child class of the niworkflows DerivativesDataSink, using xcp_d's configuration files. """ - out_path_base = "" + out_path_base = '' diff --git a/src/fmripost_aroma/interfaces/confounds.py b/src/fmripost_aroma/interfaces/confounds.py index 659a79e..23bfb30 100644 --- a/src/fmripost_aroma/interfaces/confounds.py +++ b/src/fmripost_aroma/interfaces/confounds.py @@ -21,19 +21,19 @@ class _ICAConfoundsInputSpec(BaseInterfaceInputSpec): in_directory = Directory( mandatory=True, - desc="directory where ICA derivatives are found", + desc='directory where ICA derivatives are found', ) - skip_vols = traits.Int(desc="number of non steady state volumes identified") - err_on_aroma_warn = traits.Bool(False, usedefault=True, desc="raise error if aroma fails") + skip_vols = traits.Int(desc='number of non steady state volumes identified') + err_on_aroma_warn = traits.Bool(False, usedefault=True, desc='raise error if aroma fails') class _ICAConfoundsOutputSpec(TraitedSpec): aroma_confounds = traits.Either( - None, File(exists=True, desc="output confounds file extracted from ICA-AROMA") + None, File(exists=True, desc='output confounds file extracted from ICA-AROMA') ) - aroma_noise_ics = File(exists=True, desc="ICA-AROMA noise components") - melodic_mix = File(exists=True, desc="melodic mix file") - aroma_metadata = File(exists=True, desc="tabulated ICA-AROMA metadata") + aroma_noise_ics = File(exists=True, desc='ICA-AROMA noise components') + melodic_mix = File(exists=True, desc='melodic mix file') + aroma_metadata = File(exists=True, desc='tabulated ICA-AROMA metadata') class ICAConfounds(SimpleInterface): @@ -48,13 +48,13 @@ def _run_interface(self, runtime): ) if self.inputs.err_on_aroma_warn and aroma_confounds is None: - raise RuntimeError("ICA-AROMA failed") + raise RuntimeError('ICA-AROMA failed') - aroma_confounds = self._results["aroma_confounds"] = aroma_confounds + aroma_confounds = self._results['aroma_confounds'] = aroma_confounds - self._results["aroma_noise_ics"] = motion_ics_out - self._results["melodic_mix"] = melodic_mix_out - self._results["aroma_metadata"] = aroma_metadata + self._results['aroma_noise_ics'] = motion_ics_out + self._results['melodic_mix'] = melodic_mix_out + self._results['aroma_metadata'] = aroma_metadata return runtime @@ -64,21 +64,21 @@ def _get_ica_confounds(ica_out_dir, skip_vols, newpath=None): newpath = os.getcwd() # load the txt files from ICA-AROMA - melodic_mix = os.path.join(ica_out_dir, "melodic.ica/melodic_mix") - motion_ics = os.path.join(ica_out_dir, "classified_motion_ICs.txt") - aroma_metadata = os.path.join(ica_out_dir, "classification_overview.txt") - aroma_icstats = os.path.join(ica_out_dir, "melodic.ica/melodic_ICstats") + melodic_mix = os.path.join(ica_out_dir, 'melodic.ica/melodic_mix') + motion_ics = os.path.join(ica_out_dir, 'classified_motion_ICs.txt') + aroma_metadata = os.path.join(ica_out_dir, 'classification_overview.txt') + aroma_icstats = os.path.join(ica_out_dir, 'melodic.ica/melodic_ICstats') # Change names of motion_ics and melodic_mix for output - melodic_mix_out = os.path.join(newpath, "MELODICmix.tsv") - motion_ics_out = os.path.join(newpath, "AROMAnoiseICs.csv") - aroma_metadata_out = os.path.join(newpath, "classification_overview.tsv") + melodic_mix_out = os.path.join(newpath, 'MELODICmix.tsv') + motion_ics_out = os.path.join(newpath, 'AROMAnoiseICs.csv') + aroma_metadata_out = os.path.join(newpath, 'classification_overview.tsv') # copy metion_ics file to derivatives name shutil.copyfile(motion_ics, motion_ics_out) # -1 since python lists start at index 0 - motion_ic_indices = np.loadtxt(motion_ics, dtype=int, delimiter=",", ndmin=1) - 1 + motion_ic_indices = np.loadtxt(motion_ics, dtype=int, delimiter=',', ndmin=1) - 1 melodic_mix_arr = np.loadtxt(melodic_mix, ndmin=2) # pad melodic_mix_arr with rows of zeros corresponding to number non steadystate volumes @@ -87,24 +87,24 @@ def _get_ica_confounds(ica_out_dir, skip_vols, newpath=None): melodic_mix_arr = np.vstack([zeros, melodic_mix_arr]) # save melodic_mix_arr - np.savetxt(melodic_mix_out, melodic_mix_arr, delimiter="\t") + np.savetxt(melodic_mix_out, melodic_mix_arr, delimiter='\t') # process the metadata so that the IC column entries match the BIDS name of # the regressor - aroma_metadata = pd.read_csv(aroma_metadata, sep="\t") - aroma_metadata["IC"] = [f"aroma_motion_{name}" for name in aroma_metadata["IC"]] - aroma_metadata.columns = [re.sub(r"[ |\-|\/]", "_", c) for c in aroma_metadata.columns] + aroma_metadata = pd.read_csv(aroma_metadata, sep='\t') + aroma_metadata['IC'] = [f'aroma_motion_{name}' for name in aroma_metadata['IC']] + aroma_metadata.columns = [re.sub(r'[ |\-|\/]', '_', c) for c in aroma_metadata.columns] # Add variance statistics to metadata - aroma_icstats = pd.read_csv(aroma_icstats, header=None, sep=" ")[[0, 1]] / 100 - aroma_icstats.columns = ["model_variance_explained", "total_variance_explained"] + aroma_icstats = pd.read_csv(aroma_icstats, header=None, sep=' ')[[0, 1]] / 100 + aroma_icstats.columns = ['model_variance_explained', 'total_variance_explained'] aroma_metadata = pd.concat([aroma_metadata, aroma_icstats], axis=1) - aroma_metadata.to_csv(aroma_metadata_out, sep="\t", index=False) + aroma_metadata.to_csv(aroma_metadata_out, sep='\t', index=False) # Return dummy list of ones if no noise components were found if motion_ic_indices.size == 0: - config.loggers.interfaces.warning("No noise components were classified") + config.loggers.interfaces.warning('No noise components were classified') return None, motion_ics_out, melodic_mix_out, aroma_metadata_out # the "good" ics, (e.g., not motion related) @@ -112,17 +112,17 @@ def _get_ica_confounds(ica_out_dir, skip_vols, newpath=None): # return dummy lists of zeros if no signal components were found if good_ic_arr.size == 0: - config.loggers.interfaces.warning("No signal components were classified") + config.loggers.interfaces.warning('No signal components were classified') return None, motion_ics_out, melodic_mix_out, aroma_metadata_out # transpose melodic_mix_arr so x refers to the correct dimension aggr_confounds = np.asarray([melodic_mix_arr.T[x] for x in motion_ic_indices]) # add one to motion_ic_indices to match melodic report. - aroma_confounds = os.path.join(newpath, "AROMAAggrCompAROMAConfounds.tsv") + aroma_confounds = os.path.join(newpath, 'AROMAAggrCompAROMAConfounds.tsv') pd.DataFrame( aggr_confounds.T, - columns=[f"aroma_motion_{x + 1:02d}" for x in motion_ic_indices], - ).to_csv(aroma_confounds, sep="\t", index=None) + columns=[f'aroma_motion_{x + 1:02d}' for x in motion_ic_indices], + ).to_csv(aroma_confounds, sep='\t', index=None) return aroma_confounds, motion_ics_out, melodic_mix_out, aroma_metadata_out diff --git a/src/fmripost_aroma/interfaces/reportlets.py b/src/fmripost_aroma/interfaces/reportlets.py index b6207c8..c50eb3b 100644 --- a/src/fmripost_aroma/interfaces/reportlets.py +++ b/src/fmripost_aroma/interfaces/reportlets.py @@ -82,7 +82,7 @@ class SummaryOutputSpec(TraitedSpec): - out_report = File(exists=True, desc="HTML segment containing summary") + out_report = File(exists=True, desc='HTML segment containing summary') class SummaryInterface(SimpleInterface): @@ -90,10 +90,10 @@ class SummaryInterface(SimpleInterface): def _run_interface(self, runtime): segment = self._generate_segment() - fname = os.path.join(runtime.cwd, "report.html") - with open(fname, "w") as fobj: + fname = os.path.join(runtime.cwd, 'report.html') + with open(fname, 'w') as fobj: fobj.write(segment) - self._results["out_report"] = fname + self._results['out_report'] = fname return runtime def _generate_segment(self): @@ -102,14 +102,14 @@ def _generate_segment(self): class _MELODICInputSpecRPT(nrb._SVGReportCapableInputSpec, fsl.model.MELODICInputSpec): out_report = File( - "melodic_reportlet.svg", + 'melodic_reportlet.svg', usedefault=True, - desc="Filename for the visual report generated by Nipype.", + desc='Filename for the visual report generated by Nipype.', ) report_mask = File( desc=( - "Mask used to draw the outline on the reportlet. " - "If not set the mask will be derived from the data." + 'Mask used to draw the outline on the reportlet. ' + 'If not set the mask will be derived from the data.' ), ) @@ -140,7 +140,7 @@ def _post_run_hook(self, runtime): if not self.generate_report: return runtime - NIWORKFLOWS_LOG.info("Generating report for MELODIC.") + NIWORKFLOWS_LOG.info('Generating report for MELODIC.') _melodic_dir = runtime.cwd if isdefined(self.inputs.out_dir): _melodic_dir = self.inputs.out_dir @@ -150,12 +150,12 @@ def _post_run_hook(self, runtime): if not os.path.isabs(self._out_report): self._out_report = os.path.abspath(os.path.join(runtime.cwd, self._out_report)) - mix = os.path.join(self._melodic_dir, "melodic_mix") + mix = os.path.join(self._melodic_dir, 'melodic_mix') if not os.path.exists(mix): NIWORKFLOWS_LOG.warning("MELODIC outputs not found, assuming it didn't converge.") - self._out_report = self._out_report.replace(".svg", ".html") - snippet = "

MELODIC did not converge, no output

" - with open(self._out_report, "w") as fobj: + self._out_report = self._out_report.replace('.svg', '.html') + snippet = '

MELODIC did not converge, no output

' + with open(self._out_report, 'w') as fobj: fobj.write(snippet) return runtime @@ -168,7 +168,7 @@ def _list_outputs(self): except NotImplementedError: outputs = {} if self._out_report is not None: - outputs["out_report"] = self._out_report + outputs['out_report'] = self._out_report return outputs def _generate_report(self): @@ -189,14 +189,14 @@ class _ICAAROMAInputSpecRPT( fsl.aroma.ICA_AROMAInputSpec, ): out_report = File( - "ica_aroma_reportlet.svg", + 'ica_aroma_reportlet.svg', usedefault=True, - desc="Filename for the visual" " report generated " "by Nipype.", + desc='Filename for the visual report generated by Nipype.', ) report_mask = File( desc=( - "Mask used to draw the outline on the reportlet. " - "If not set the mask will be derived from the data." + 'Mask used to draw the outline on the reportlet. ' + 'If not set the mask will be derived from the data.' ), ) @@ -228,30 +228,30 @@ def _generate_report(self): def _post_run_hook(self, runtime): outputs = self.aggregate_outputs(runtime=runtime) - self._noise_components_file = os.path.join(outputs.out_dir, "classified_motion_ICs.txt") + self._noise_components_file = os.path.join(outputs.out_dir, 'classified_motion_ICs.txt') - NIWORKFLOWS_LOG.info("Generating report for ICA AROMA") + NIWORKFLOWS_LOG.info('Generating report for ICA AROMA') return super()._post_run_hook(runtime) class SubjectSummaryInputSpec(BaseInterfaceInputSpec): - t1w = InputMultiObject(File(exists=True), desc="T1w structural images") - t2w = InputMultiObject(File(exists=True), desc="T2w structural images") - subjects_dir = Directory(desc="FreeSurfer subjects directory") - subject_id = Str(desc="Subject ID") + t1w = InputMultiObject(File(exists=True), desc='T1w structural images') + t2w = InputMultiObject(File(exists=True), desc='T2w structural images') + subjects_dir = Directory(desc='FreeSurfer subjects directory') + subject_id = Str(desc='Subject ID') bold = InputMultiObject( traits.Either(File(exists=True), traits.List(File(exists=True))), - desc="BOLD functional series", + desc='BOLD functional series', ) - std_spaces = traits.List(Str, desc="list of standard spaces") - nstd_spaces = traits.List(Str, desc="list of non-standard spaces") + std_spaces = traits.List(Str, desc='list of standard spaces') + nstd_spaces = traits.List(Str, desc='list of non-standard spaces') class SubjectSummaryOutputSpec(SummaryOutputSpec): # This exists to ensure that the summary is run prior to the first ReconAll # call, allowing a determination whether there is a pre-existing directory - subject_id = Str(desc="FreeSurfer subject ID") + subject_id = Str(desc='FreeSurfer subject ID') class SubjectSummary(SummaryInterface): @@ -260,57 +260,57 @@ class SubjectSummary(SummaryInterface): def _run_interface(self, runtime): if isdefined(self.inputs.subject_id): - self._results["subject_id"] = self.inputs.subject_id + self._results['subject_id'] = self.inputs.subject_id return super()._run_interface(runtime) def _generate_segment(self): BIDS_NAME = re.compile( - r"^(.*\/)?" - "(?Psub-[a-zA-Z0-9]+)" - "(_(?Pses-[a-zA-Z0-9]+))?" - "(_(?Ptask-[a-zA-Z0-9]+))?" - "(_(?Pacq-[a-zA-Z0-9]+))?" - "(_(?Prec-[a-zA-Z0-9]+))?" - "(_(?Prun-[a-zA-Z0-9]+))?" + r'^(.*\/)?' + '(?Psub-[a-zA-Z0-9]+)' + '(_(?Pses-[a-zA-Z0-9]+))?' + '(_(?Ptask-[a-zA-Z0-9]+))?' + '(_(?Pacq-[a-zA-Z0-9]+))?' + '(_(?Prec-[a-zA-Z0-9]+))?' + '(_(?Prun-[a-zA-Z0-9]+))?' ) if not isdefined(self.inputs.subjects_dir): - freesurfer_status = "Not run" + freesurfer_status = 'Not run' else: recon = ReconAll( subjects_dir=self.inputs.subjects_dir, - subject_id="sub-" + self.inputs.subject_id, + subject_id='sub-' + self.inputs.subject_id, T1_files=self.inputs.t1w, - flags="-noskullstrip", + flags='-noskullstrip', ) - if recon.cmdline.startswith("echo"): - freesurfer_status = "Pre-existing directory" + if recon.cmdline.startswith('echo'): + freesurfer_status = 'Pre-existing directory' else: - freesurfer_status = "Run by fMRIPost-AROMA" + freesurfer_status = 'Run by fMRIPost-AROMA' - t2w_seg = "" + t2w_seg = '' if self.inputs.t2w: - t2w_seg = f"(+ {len(self.inputs.t2w):d} T2-weighted)" + t2w_seg = f'(+ {len(self.inputs.t2w):d} T2-weighted)' # Add list of tasks with number of runs bold_series = self.inputs.bold if isdefined(self.inputs.bold) else [] bold_series = [s[0] if isinstance(s, list) else s for s in bold_series] counts = Counter( - BIDS_NAME.search(series).groupdict()["task_id"][5:] for series in bold_series + BIDS_NAME.search(series).groupdict()['task_id'][5:] for series in bold_series ) - tasks = "" + tasks = '' if counts: header = '\t\t
    ' - footer = "\t\t
" + footer = '\t\t' lines = [ - "\t\t\t
  • Task: {task_id} ({n_runs:d} run{s})
  • ".format( - task_id=task_id, n_runs=n_runs, s="" if n_runs == 1 else "s" + '\t\t\t
  • Task: {task_id} ({n_runs:d} run{s})
  • '.format( + task_id=task_id, n_runs=n_runs, s='' if n_runs == 1 else 's' ) for task_id, n_runs in sorted(counts.items()) ] - tasks = "\n".join([header] + lines + [footer]) + tasks = '\n'.join([header] + lines + [footer]) return SUBJECT_TEMPLATE.format( subject_id=self.inputs.subject_id, @@ -318,15 +318,15 @@ def _generate_segment(self): t2w=t2w_seg, n_bold=len(bold_series), tasks=tasks, - std_spaces=", ".join(self.inputs.std_spaces), - nstd_spaces=", ".join(self.inputs.nstd_spaces), + std_spaces=', '.join(self.inputs.std_spaces), + nstd_spaces=', '.join(self.inputs.nstd_spaces), freesurfer_status=freesurfer_status, ) class AboutSummaryInputSpec(BaseInterfaceInputSpec): - version = Str(desc="FMRIPREP version") - command = Str(desc="FMRIPREP command") + version = Str(desc='FMRIPREP version') + command = Str(desc='FMRIPREP command') # Date not included - update timestamp only if version or command changes @@ -337,5 +337,5 @@ def _generate_segment(self): return ABOUT_TEMPLATE.format( version=self.inputs.version, command=self.inputs.command, - date=time.strftime("%Y-%m-%d %H:%M:%S %z"), + date=time.strftime('%Y-%m-%d %H:%M:%S %z'), ) diff --git a/src/fmripost_aroma/utils/bids.py b/src/fmripost_aroma/utils/bids.py index 7ca2369..f0f9560 100644 --- a/src/fmripost_aroma/utils/bids.py +++ b/src/fmripost_aroma/utils/bids.py @@ -3,7 +3,6 @@ from __future__ import annotations import json -import typing as ty from collections import defaultdict from pathlib import Path @@ -18,12 +17,12 @@ def collect_derivatives( entities: dict, fieldmap_id: str | None, spec: dict | None = None, - patterns: ty.List[str] | None = None, + patterns: list[str] | None = None, ): """Gather existing derivatives and compose a cache.""" if spec is None or patterns is None: _spec, _patterns = tuple( - json.loads(load_data.readable("io_spec.json").read_text()).values() + json.loads(load_data.readable('io_spec.json').read_text()).values() ) if spec is None: @@ -33,34 +32,34 @@ def collect_derivatives( derivs_cache = defaultdict(list, {}) - layout = BIDSLayout(derivatives_dir, config=["bids", "derivatives"], validate=False) + layout = BIDSLayout(derivatives_dir, config=['bids', 'derivatives'], validate=False) derivatives_dir = Path(derivatives_dir) # Search for preprocessed BOLD data - for k, q in spec["baseline"]["derivatives"].items(): + for k, q in spec['baseline']['derivatives'].items(): query = {**q, **entities} - item = layout.get(return_type="filename", **query) + item = layout.get(return_type='filename', **query) if not item: continue derivs_cache[k] = item[0] if len(item) == 1 else item # Search for raw BOLD data if not derivs_cache and raw_dir is not None: - raw_layout = BIDSLayout(raw_dir, config=["bids"], validate=False) + raw_layout = BIDSLayout(raw_dir, config=['bids'], validate=False) raw_dir = Path(raw_dir) - for k, q in spec["baseline"]["raw"].items(): + for k, q in spec['baseline']['raw'].items(): query = {**q, **entities} - item = raw_layout.get(return_type="filename", **query) + item = raw_layout.get(return_type='filename', **query) if not item: continue derivs_cache[k] = item[0] if len(item) == 1 else item - for xfm, q in spec["transforms"].items(): + for xfm, q in spec['transforms'].items(): query = {**q, **entities} - if xfm == "boldref2fmap": - query["to"] = fieldmap_id - item = layout.get(return_type="filename", **q) + if xfm == 'boldref2fmap': + query['to'] = fieldmap_id + item = layout.get(return_type='filename', **q) if not item: continue derivs_cache[xfm] = item[0] if len(item) == 1 else item @@ -75,17 +74,17 @@ def collect_derivatives_old( ): """Collect preprocessing derivatives.""" subj_data = { - "bold_raw": "", - "" "bold_boldref": "", - "bold_MNI152NLin6": "", + 'bold_raw': '', + 'bold_boldref': '', + 'bold_MNI152NLin6': '', } query = { - "bold": { - "space": "MNI152NLin6Asym", - "res": 2, - "desc": "preproc", - "suffix": "bold", - "extension": [".nii", ".nii.gz"], + 'bold': { + 'space': 'MNI152NLin6Asym', + 'res': 2, + 'desc': 'preproc', + 'suffix': 'bold', + 'extension': ['.nii', '.nii.gz'], } } subj_data = layout.get(subject=subject_id, **query) @@ -99,8 +98,8 @@ def collect_run_data( """Collect files and metadata related to a given BOLD file.""" queries = {} run_data = { - "mask": {"desc": "brain", "suffix": "mask", "extension": [".nii", ".nii.gz"]}, - "confounds": {"desc": "confounds", "suffix": "timeseries", "extension": ".tsv"}, + 'mask': {'desc': 'brain', 'suffix': 'mask', 'extension': ['.nii', '.nii.gz']}, + 'confounds': {'desc': 'confounds', 'suffix': 'timeseries', 'extension': '.tsv'}, } for k, v in queries.items(): run_data[k] = layout.get_nearest(bold_file, **v) diff --git a/src/fmripost_aroma/workflows/__init__.py b/src/fmripost_aroma/workflows/__init__.py index 658bfbb..e637066 100644 --- a/src/fmripost_aroma/workflows/__init__.py +++ b/src/fmripost_aroma/workflows/__init__.py @@ -2,4 +2,4 @@ from fmripost_aroma.workflows import aroma, base -__all__ = ["aroma", "base"] +__all__ = ['aroma', 'base'] diff --git a/src/fmripost_aroma/workflows/aroma.py b/src/fmripost_aroma/workflows/aroma.py index e3d58f0..d120459 100644 --- a/src/fmripost_aroma/workflows/aroma.py +++ b/src/fmripost_aroma/workflows/aroma.py @@ -133,7 +133,7 @@ def init_ica_aroma_wf( from fmripost_aroma.interfaces.confounds import ICAConfounds from fmripost_aroma.interfaces.reportlets import ICAAROMARPT - workflow = Workflow(name=_get_wf_name(bold_file, "aroma")) + workflow = Workflow(name=_get_wf_name(bold_file, 'aroma')) workflow.__postdesc__ = """\ Automatic removal of motion artifacts using independent component analysis [ICA-AROMA, @aroma] was performed on the *preprocessed BOLD on MNI space* @@ -148,164 +148,164 @@ def init_ica_aroma_wf( inputnode = pe.Node( niu.IdentityInterface( fields=[ - "bold_std", - "bold_mask_std", - "confounds", - "name_source", - "skip_vols", - "spatial_reference", + 'bold_std', + 'bold_mask_std', + 'confounds', + 'name_source', + 'skip_vols', + 'spatial_reference', ], ), - name="inputnode", + name='inputnode', ) outputnode = pe.Node( niu.IdentityInterface( fields=[ - "aroma_confounds", - "aroma_noise_ics", - "melodic_mix", - "nonaggr_denoised_file", - "aroma_metadata", + 'aroma_confounds', + 'aroma_noise_ics', + 'melodic_mix', + 'nonaggr_denoised_file', + 'aroma_metadata', ], ), - name="outputnode", + name='outputnode', ) # Convert confounds to FSL motpars file. ... rm_non_steady_state = pe.Node( - niu.Function(function=_remove_volumes, output_names=["bold_cut"]), - name="rm_nonsteady", + niu.Function(function=_remove_volumes, output_names=['bold_cut']), + name='rm_nonsteady', ) # fmt:off workflow.connect([ (inputnode, rm_non_steady_state, [ - ("skip_vols", "skip_vols"), - ("bold_std", "bold_file"), + ('skip_vols', 'skip_vols'), + ('bold_std', 'bold_file'), ]), ]) # fmt:on calc_median_val = pe.Node( - fsl.ImageStats(op_string="-k %s -p 50"), - name="calc_median_val", + fsl.ImageStats(op_string='-k %s -p 50'), + name='calc_median_val', ) calc_bold_mean = pe.Node( fsl.MeanImage(), - name="calc_bold_mean", + name='calc_bold_mean', ) getusans = pe.Node( - niu.Function(function=_getusans_func, output_names=["usans"]), - name="getusans", + niu.Function(function=_getusans_func, output_names=['usans']), + name='getusans', mem_gb=0.01, ) smooth = pe.Node( fsl.SUSAN( fwhm=susan_fwhm, - output_type="NIFTI" if config.execution.low_mem else "NIFTI_GZ", + output_type='NIFTI' if config.execution.low_mem else 'NIFTI_GZ', ), - name="smooth", + name='smooth', ) # melodic node melodic = pe.Node( fsl.MELODIC( no_bet=True, - tr_sec=float(metadata["RepetitionTime"]), + tr_sec=float(metadata['RepetitionTime']), mm_thresh=0.5, out_stats=True, dim=aroma_melodic_dim, ), - name="melodic", + name='melodic', ) # ica_aroma node ica_aroma = pe.Node( ICAAROMARPT( - denoise_type="nonaggr", + denoise_type='nonaggr', generate_report=True, - TR=metadata["RepetitionTime"], - args="-np", + TR=metadata['RepetitionTime'], + args='-np', ), - name="ica_aroma", + name='ica_aroma', ) add_non_steady_state = pe.Node( - niu.Function(function=_add_volumes, output_names=["bold_add"]), - name="add_nonsteady", + niu.Function(function=_add_volumes, output_names=['bold_add']), + name='add_nonsteady', ) # extract the confound ICs from the results ica_aroma_confound_extraction = pe.Node( ICAConfounds(err_on_aroma_warn=err_on_aroma_warn), - name="ica_aroma_confound_extraction", + name='ica_aroma_confound_extraction', ) ica_aroma_metadata_fmt = pe.Node( TSV2JSON( - index_column="IC", + index_column='IC', output=None, enforce_case=True, additional_metadata={ - "Method": { - "Name": "ICA-AROMA", - "Version": os.getenv("AROMA_VERSION", "n/a"), + 'Method': { + 'Name': 'ICA-AROMA', + 'Version': os.getenv('AROMA_VERSION', 'n/a'), }, }, ), - name="ica_aroma_metadata_fmt", + name='ica_aroma_metadata_fmt', ) ds_report_ica_aroma = pe.Node( - DerivativesDataSink(desc="aroma", datatype="figures", dismiss_entities=("echo",)), - name="ds_report_ica_aroma", + DerivativesDataSink(desc='aroma', datatype='figures', dismiss_entities=('echo',)), + name='ds_report_ica_aroma', run_without_submitting=True, mem_gb=config.DEFAULT_MEMORY_MIN_GB, ) # fmt:off workflow.connect([ - (inputnode, ica_aroma, [("movpar_file", "motion_parameters")]), - (inputnode, calc_median_val, [("bold_mask_std", "mask_file")]), - (rm_non_steady_state, calc_median_val, [("bold_cut", "in_file")]), - (rm_non_steady_state, calc_bold_mean, [("bold_cut", "in_file")]), - (calc_bold_mean, getusans, [("out_file", "image")]), - (calc_median_val, getusans, [("out_stat", "thresh")]), + (inputnode, ica_aroma, [('movpar_file', 'motion_parameters')]), + (inputnode, calc_median_val, [('bold_mask_std', 'mask_file')]), + (rm_non_steady_state, calc_median_val, [('bold_cut', 'in_file')]), + (rm_non_steady_state, calc_bold_mean, [('bold_cut', 'in_file')]), + (calc_bold_mean, getusans, [('out_file', 'image')]), + (calc_median_val, getusans, [('out_stat', 'thresh')]), # Connect input nodes to complete smoothing - (rm_non_steady_state, smooth, [("bold_cut", "in_file")]), - (getusans, smooth, [("usans", "usans")]), - (calc_median_val, smooth, [(("out_stat", _getbtthresh), "brightness_threshold")]), + (rm_non_steady_state, smooth, [('bold_cut', 'in_file')]), + (getusans, smooth, [('usans', 'usans')]), + (calc_median_val, smooth, [(('out_stat', _getbtthresh), 'brightness_threshold')]), # connect smooth to melodic - (smooth, melodic, [("smoothed_file", "in_files")]), - (inputnode, melodic, [("bold_mask_std", "mask")]), + (smooth, melodic, [('smoothed_file', 'in_files')]), + (inputnode, melodic, [('bold_mask_std', 'mask')]), # connect nodes to ICA-AROMA - (smooth, ica_aroma, [("smoothed_file", "in_file")]), + (smooth, ica_aroma, [('smoothed_file', 'in_file')]), (inputnode, ica_aroma, [ - ("bold_mask_std", "report_mask"), - ("bold_mask_std", "mask")]), - (melodic, ica_aroma, [("out_dir", "melodic_dir")]), + ('bold_mask_std', 'report_mask'), + ('bold_mask_std', 'mask')]), + (melodic, ica_aroma, [('out_dir', 'melodic_dir')]), # generate tsvs from ICA-AROMA - (ica_aroma, ica_aroma_confound_extraction, [("out_dir", "in_directory")]), - (inputnode, ica_aroma_confound_extraction, [("skip_vols", "skip_vols")]), - (ica_aroma_confound_extraction, ica_aroma_metadata_fmt, [("aroma_metadata", "in_file")]), + (ica_aroma, ica_aroma_confound_extraction, [('out_dir', 'in_directory')]), + (inputnode, ica_aroma_confound_extraction, [('skip_vols', 'skip_vols')]), + (ica_aroma_confound_extraction, ica_aroma_metadata_fmt, [('aroma_metadata', 'in_file')]), # output for processing and reporting (ica_aroma_confound_extraction, outputnode, [ - ("aroma_confounds", "aroma_confounds"), - ("aroma_noise_ics", "aroma_noise_ics"), - ("melodic_mix", "melodic_mix"), + ('aroma_confounds', 'aroma_confounds'), + ('aroma_noise_ics', 'aroma_noise_ics'), + ('melodic_mix', 'melodic_mix'), ]), - (ica_aroma_metadata_fmt, outputnode, [("output", "aroma_metadata")]), - (ica_aroma, add_non_steady_state, [("nonaggr_denoised_file", "bold_cut_file")]), + (ica_aroma_metadata_fmt, outputnode, [('output', 'aroma_metadata')]), + (ica_aroma, add_non_steady_state, [('nonaggr_denoised_file', 'bold_cut_file')]), (inputnode, add_non_steady_state, [ - ("bold_std", "bold_file"), - ("skip_vols", "skip_vols"), + ('bold_std', 'bold_file'), + ('skip_vols', 'skip_vols'), ]), - (add_non_steady_state, outputnode, [("bold_add", "nonaggr_denoised_file")]), - (ica_aroma, ds_report_ica_aroma, [("out_report", "in_file")]), + (add_non_steady_state, outputnode, [('bold_add', 'nonaggr_denoised_file')]), + (ica_aroma, ds_report_ica_aroma, [('out_report', 'in_file')]), ]) # fmt:on return workflow @@ -316,7 +316,7 @@ def _getbtthresh(medianval): def _getusans_func(image, thresh): - return [tuple([image, thresh])] + return [(image, thresh)] def _remove_volumes(bold_file, skip_vols): @@ -327,7 +327,7 @@ def _remove_volumes(bold_file, skip_vols): if skip_vols == 0: return bold_file - out = fname_presuffix(bold_file, suffix="_cut") + out = fname_presuffix(bold_file, suffix='_cut') bold_img = nb.load(bold_file) bold_img.__class__( bold_img.dataobj[..., skip_vols:], bold_img.affine, bold_img.header @@ -349,7 +349,7 @@ def _add_volumes(bold_file, bold_cut_file, skip_vols): bold_data = np.concatenate((bold_img.dataobj[..., :skip_vols], bold_cut_img.dataobj), axis=3) - out = fname_presuffix(bold_cut_file, suffix="_addnonsteady") + out = fname_presuffix(bold_cut_file, suffix='_addnonsteady') bold_img.__class__(bold_data, bold_img.affine, bold_img.header).to_filename(out) return out @@ -370,5 +370,5 @@ def _get_wf_name(bold_fname, prefix): from nipype.utils.filemanip import split_filename fname = split_filename(bold_fname)[1] - fname_nosub = "_".join(fname.split("_")[1:-1]) + fname_nosub = '_'.join(fname.split('_')[1:-1]) return f"{prefix}_{fname_nosub.replace('-', '_')}_wf" diff --git a/src/fmripost_aroma/workflows/base.py b/src/fmripost_aroma/workflows/base.py index de31dfe..46cdd42 100644 --- a/src/fmripost_aroma/workflows/base.py +++ b/src/fmripost_aroma/workflows/base.py @@ -68,7 +68,7 @@ def init_fmripost_aroma_wf(): ver = Version(config.environment.version) - fmripost_aroma_wf = Workflow(name=f"fmripost_aroma_{ver.major}_{ver.minor}_wf") + fmripost_aroma_wf = Workflow(name=f'fmripost_aroma_{ver.major}_{ver.minor}_wf') fmripost_aroma_wf.base_dir = config.execution.work_dir freesurfer = config.workflow.run_reconall @@ -76,9 +76,9 @@ def init_fmripost_aroma_wf(): fsdir = pe.Node( BIDSFreeSurferDir( derivatives=config.execution.output_dir, - freesurfer_home=os.getenv("FREESURFER_HOME"), + freesurfer_home=os.getenv('FREESURFER_HOME'), spaces=config.workflow.spaces.get_fs_spaces(), - minimum_fs_version="7.0.0", + minimum_fs_version='7.0.0', ), name=f"fsdir_run_{config.execution.run_uuid.replace('-', '_')}", run_without_submitting=True, @@ -89,10 +89,10 @@ def init_fmripost_aroma_wf(): for subject_id in config.execution.participant_label: single_subject_wf = init_single_subject_wf(subject_id) - single_subject_wf.config["execution"]["crashdump_dir"] = str( + single_subject_wf.config['execution']['crashdump_dir'] = str( config.execution.fmripost_aroma_dir - / f"sub-{subject_id}" - / "log" + / f'sub-{subject_id}' + / 'log' / config.execution.run_uuid ) for node in single_subject_wf._get_all_nodes(): @@ -101,9 +101,9 @@ def init_fmripost_aroma_wf(): if freesurfer: fmripost_aroma_wf.connect( fsdir, - "subjects_dir", + 'subjects_dir', single_subject_wf, - "inputnode.subjects_dir", + 'inputnode.subjects_dir', ) else: fmripost_aroma_wf.add_nodes([single_subject_wf]) @@ -111,12 +111,12 @@ def init_fmripost_aroma_wf(): # Dump a copy of the config file into the log directory log_dir = ( config.execution.fmripost_aroma_dir - / f"sub-{subject_id}" - / "log" + / f'sub-{subject_id}' + / 'log' / config.execution.run_uuid ) log_dir.mkdir(exist_ok=True, parents=True) - config.to_filename(log_dir / "fmripost_aroma.toml") + config.to_filename(log_dir / 'fmripost_aroma.toml') return fmripost_aroma_wf @@ -183,7 +183,7 @@ def init_single_subject_wf(subject_id: str): from fmripost_aroma.utils.bids import collect_derivatives from fmripost_aroma.workflows.aroma import init_ica_aroma_wf - workflow = Workflow(name=f"sub_{subject_id}_wf") + workflow = Workflow(name=f'sub_{subject_id}_wf') workflow.__desc__ = f""" Results included in this manuscript come from preprocessing performed using *fMRIPost-AROMA* {config.environment.version} @@ -222,14 +222,14 @@ def init_single_subject_wf(subject_id: str): bids_filters=config.execution.bids_filters, ) - if "flair" in config.workflow.ignore: - subject_data["flair"] = [] - if "t2w" in config.workflow.ignore: - subject_data["t2w"] = [] + if 'flair' in config.workflow.ignore: + subject_data['flair'] = [] + if 't2w' in config.workflow.ignore: + subject_data['t2w'] = [] anat_only = config.workflow.anat_only # Make sure we always go through these two checks - if not anat_only and not subject_data["bold"]: + if not anat_only and not subject_data['bold']: task_id = config.execution.task_id raise RuntimeError( f"No BOLD images found for participant {subject_id} and " @@ -237,15 +237,16 @@ def init_single_subject_wf(subject_id: str): "All workflows require BOLD images." ) - if subject_data["roi"]: + if subject_data['roi']: warnings.warn( f"Lesion mask {subject_data['roi']} found. " "Future versions of fMRIPost-AROMA will use alternative conventions. " "Please refer to the documentation before upgrading.", FutureWarning, + stacklevel=1, ) - inputnode = pe.Node(niu.IdentityInterface(fields=["subjects_dir"]), name="inputnode") + inputnode = pe.Node(niu.IdentityInterface(fields=['subjects_dir']), name='inputnode') bidssrc = pe.Node( BIDSDataGrabber( @@ -253,47 +254,47 @@ def init_single_subject_wf(subject_id: str): anat_only=config.workflow.anat_only, subject_id=subject_id, ), - name="bidssrc", + name='bidssrc', ) bids_info = pe.Node( - BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name="bids_info" + BIDSInfo(bids_dir=config.execution.bids_dir, bids_validate=False), name='bids_info' ) summary = pe.Node( SubjectSummary( - std_spaces=["MNI152NLin6Asym"], + std_spaces=['MNI152NLin6Asym'], nstd_spaces=None, ), - name="summary", + name='summary', run_without_submitting=True, ) about = pe.Node( - AboutSummary(version=config.environment.version, command=" ".join(sys.argv)), - name="about", + AboutSummary(version=config.environment.version, command=' '.join(sys.argv)), + name='about', run_without_submitting=True, ) ds_report_summary = pe.Node( DerivativesDataSink( base_directory=config.execution.fmripost_aroma_dir, - desc="summary", - datatype="figures", - dismiss_entities=("echo",), + desc='summary', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_report_summary", + name='ds_report_summary', run_without_submitting=True, ) ds_report_about = pe.Node( DerivativesDataSink( base_directory=config.execution.fmripost_aroma_dir, - desc="about", - datatype="figures", - dismiss_entities=("echo",), + desc='about', + datatype='figures', + dismiss_entities=('echo',), ), - name="ds_report_about", + name='ds_report_about', run_without_submitting=True, ) @@ -318,7 +319,7 @@ def init_single_subject_wf(subject_id: str): (across all tasks and sessions), the following postprocessing was performed. """ - for bold_file in subject_data["bold"]: + for bold_file in subject_data['bold']: functional_cache = {} if config.execution.derivatives: # Collect native-space derivatives and warp them to MNI152NLin6Asym @@ -343,17 +344,17 @@ def init_single_subject_wf(subject_id: str): bold_file=bold_file, precomputed=functional_cache, ) - ica_aroma_wf.__desc__ = func_pre_desc + (ica_aroma_wf.__desc__ or "") + ica_aroma_wf.__desc__ = func_pre_desc + (ica_aroma_wf.__desc__ or '') # fmt:off workflow.connect([ (inputnode, ica_aroma_wf, [ ('bold_std', 'inputnode.bold_std'), - ("bold_mask_std", "inputnode.bold_mask_std"), - ("movpar_file", "inputnode.movpar_file"), - ("name_source", "inputnode.name_source"), - ("skip_vols", "inputnode.skip_vols"), - ("spatial_reference", "inputnode.spatial_reference"), + ('bold_mask_std', 'inputnode.bold_mask_std'), + ('movpar_file', 'inputnode.movpar_file'), + ('name_source', 'inputnode.name_source'), + ('skip_vols', 'inputnode.skip_vols'), + ('spatial_reference', 'inputnode.spatial_reference'), ]), ]) # fmt:on @@ -362,12 +363,12 @@ def init_single_subject_wf(subject_id: str): def _prefix(subid): - return subid if subid.startswith("sub-") else f"sub-{subid}" + return subid if subid.startswith('sub-') else f'sub-{subid}' def clean_datasinks(workflow: pe.Workflow) -> pe.Workflow: """Overwrite ``out_path_base`` of smriprep's DataSinks.""" for node in workflow.list_node_names(): - if node.split(".")[-1].startswith("ds_"): - workflow.get_node(node).interface.out_path_base = "" + if node.split('.')[-1].startswith('ds_'): + workflow.get_node(node).interface.out_path_base = '' return workflow diff --git a/src/fmripost_aroma/workflows/tests/__init__.py b/src/fmripost_aroma/workflows/tests/__init__.py index 5f96efc..74d2074 100644 --- a/src/fmripost_aroma/workflows/tests/__init__.py +++ b/src/fmripost_aroma/workflows/tests/__init__.py @@ -38,13 +38,13 @@ def mock_config(bids_dir=None): """Create a mock config for documentation and testing purposes.""" from ... import config - _old_fs = os.getenv("FREESURFER_HOME") + _old_fs = os.getenv('FREESURFER_HOME') if not _old_fs: - os.environ["FREESURFER_HOME"] = mkdtemp() + os.environ['FREESURFER_HOME'] = mkdtemp() - settings = loads(data.load.readable("tests/config.toml").read_text()) + settings = loads(data.load.readable('tests/config.toml').read_text()) for sectionname, configs in settings.items(): - if sectionname != "environment": + if sectionname != 'environment': section = getattr(config, sectionname) section.load(configs, init=False) config.nipype.omp_nthreads = 1 @@ -52,7 +52,7 @@ def mock_config(bids_dir=None): config.loggers.init() config.init_spaces() - bids_dir = bids_dir or data.load("tests/ds000005").absolute() + bids_dir = bids_dir or data.load('tests/ds000005').absolute() config.execution.work_dir = Path(mkdtemp()) config.execution.bids_dir = bids_dir @@ -67,4 +67,4 @@ def mock_config(bids_dir=None): shutil.rmtree(config.execution.fmriprep_dir) if not _old_fs: - del os.environ["FREESURFER_HOME"] + del os.environ['FREESURFER_HOME'] diff --git a/src/fmripost_aroma/workflows/tests/test_base.py b/src/fmripost_aroma/workflows/tests/test_base.py index da4b17c..7c5fa1d 100644 --- a/src/fmripost_aroma/workflows/tests/test_base.py +++ b/src/fmripost_aroma/workflows/tests/test_base.py @@ -4,5 +4,5 @@ def test_init_single_subject_wf(): from fmripost_aroma.workflows.base import init_single_subject_wf - wf = init_single_subject_wf(subject_id="01") - assert wf.name == "sub_01_wf" + wf = init_single_subject_wf(subject_id='01') + assert wf.name == 'sub_01_wf'