From 009a972652fadbf98dc9fc8ae274d862df276e9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= <117362283+bclenet@users.noreply.github.com> Date: Mon, 26 Feb 2024 17:01:38 +0100 Subject: [PATCH] Pytest fixture to use temporary directories (#182) * [TEST] Using temp_dir folder [DOC] paths inside container [LINT] pylint config file * Updating conftest file * Updating pipeline tests --- .gitignore | 2 +- .pylintrc | 631 ++++++++++++++++++ INSTALL.md | 10 +- docs/environment.md | 4 +- .../utils/configuration/default_config.toml | 6 +- .../utils/configuration/testing_config.toml | 10 +- tests/conftest.py | 12 +- tests/core/test_common.py | 65 +- tests/pipelines/test_team_J7F9.py | 21 +- tests/pipelines/test_team_T54A.py | 21 +- tests/pipelines/test_team_U26C.py | 60 +- tests/pipelines/test_team_X19V.py | 40 +- tests/test_conftest.py | 58 +- 13 files changed, 746 insertions(+), 194 deletions(-) create mode 100755 .pylintrc diff --git a/.gitignore b/.gitignore index 5b1b525d..d8afe952 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,6 @@ # to avoid commiting data -./data/ +data/ # neuro user in docker image neuro diff --git a/.pylintrc b/.pylintrc new file mode 100755 index 00000000..3bfeb5ee --- /dev/null +++ b/.pylintrc @@ -0,0 +1,631 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules=pytest + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.10 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# Add paths to the list of the source roots. Supports globbing patterns. The +# source root is an absolute path or a path relative to the current working +# directory used to determine a package namespace for modules located under the +# source root. +source-roots= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _ + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type alias names. If left empty, type +# alias names will be checked with the set naming style. +#typealias-rgx= + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + asyncSetUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict,_fields,_replace,_source,_make,os._exit + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. No available dictionaries : You need to install +# both the python package and the system dependency for enchant to work.. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace,nipype.interfaces.base.support.Bunch + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins= + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/INSTALL.md b/INSTALL.md index 18de2747..e9f124ba 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -63,7 +63,7 @@ Start a Docker container from the Docker image : ```bash # Replace PATH_TO_THE_REPOSITORY in the following command (e.g.: with /home/user/dev/narps_open_pipelines/) -docker run -it -v PATH_TO_THE_REPOSITORY:/home/neuro/code/ nipype/nipype:py38 +docker run -it -v PATH_TO_THE_REPOSITORY:/work/ nipype/nipype:py38 ``` Optionally edit the configuration file `narps_open/utils/configuration/default_config.toml` so that the referred paths match the ones inside the container. E.g.: if using the previous command line, the `directories` part of the configuration file should be : @@ -73,9 +73,9 @@ Optionally edit the configuration file `narps_open/utils/configuration/default_c # ... [directories] -dataset = "/home/neuro/code/data/original/ds001734/" -reproduced_results = "/home/neuro/code/data/reproduced/" -narps_results = "/home/neuro/code/data/results/" +dataset = "/work/data/original/ds001734/" +reproduced_results = "/work/data/reproduced/" +narps_results = "/work/data/results/" # ... ``` @@ -87,7 +87,7 @@ Install NARPS Open Pipelines inside the container : ```bash source activate neuro -cd /home/neuro/code/ +cd /work/ pip install . ``` diff --git a/docs/environment.md b/docs/environment.md index a345e94f..31c029b4 100644 --- a/docs/environment.md +++ b/docs/environment.md @@ -15,7 +15,7 @@ From this command line, you need to add volumes to be able to link with your loc ```bash # Replace PATH_TO_THE_REPOSITORY in the following command (e.g.: with /home/user/dev/narps_open_pipelines/) docker run -it \ - -v PATH_TO_THE_REPOSITORY:/home/neuro/code/ \ + -v PATH_TO_THE_REPOSITORY:/work/ \ nipype/nipype:py38 ``` @@ -25,7 +25,7 @@ If you wish to use [Jupyter](https://jupyter.org/) to run the code, a port forwa ```bash docker run -it \ - -v PATH_TO_THE_REPOSITORY:/home/neuro/code/ \ + -v PATH_TO_THE_REPOSITORY:/work/ \ -p 8888:8888 \ nipype/nipype:py38 ``` diff --git a/narps_open/utils/configuration/default_config.toml b/narps_open/utils/configuration/default_config.toml index 81f312a9..119e743f 100644 --- a/narps_open/utils/configuration/default_config.toml +++ b/narps_open/utils/configuration/default_config.toml @@ -3,9 +3,9 @@ title = "Default configuration for the NARPS open pipelines project" config_type = "default" [directories] -dataset = "data/original/ds001734/" -reproduced_results = "data/reproduced/" -narps_results = "data/results/" +dataset = "/work/data/original/ds001734/" +reproduced_results = "/work/run/reproduced/" +narps_results = "/work/data/results/" [runner] nb_procs = 8 # Maximum number of threads executed by the runner diff --git a/narps_open/utils/configuration/testing_config.toml b/narps_open/utils/configuration/testing_config.toml index 86ba77b8..943ae1aa 100644 --- a/narps_open/utils/configuration/testing_config.toml +++ b/narps_open/utils/configuration/testing_config.toml @@ -3,11 +3,11 @@ title = "Testing configuration for the NARPS open pipelines project" config_type = "testing" [directories] -dataset = "data/original/ds001734/" -reproduced_results = "run/data/reproduced/" -narps_results = "data/results/" -test_data = "tests/test_data/" -test_runs = "run/" +dataset = "/work/data/original/ds001734/" +reproduced_results = "/work/run/reproduced/" +narps_results = "/work/data/results/" +test_data = "/work/tests/test_data/" +test_runs = "/work/run/" [runner] nb_procs = 8 # Maximum number of threads executed by the runner diff --git a/tests/conftest.py b/tests/conftest.py index 73dd095e..2223518e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,12 +6,13 @@ pytest on (a) test file(s) in the same directory. """ -from os import remove +from os import remove, mkdir from os.path import join, isfile +from tempfile import mkdtemp from shutil import rmtree from numpy import isclose -from pytest import helpers +from pytest import helpers, fixture from pathvalidate import is_valid_filepath from narps_open.pipelines import Pipeline @@ -24,6 +25,13 @@ # Init configuration, to ensure it is in testing mode Configuration(config_type='testing') +@fixture +def temporary_data_dir(): + """ A fixture to create and remove a temporary directory for the tests """ + data_dir = mkdtemp() + yield data_dir + rmtree(data_dir, ignore_errors = True) + @helpers.register def compare_float_2d_arrays(array_1, array_2): """ Assert array_1 and array_2 are close enough """ diff --git a/tests/core/test_common.py b/tests/core/test_common.py index 1c362023..e430b795 100644 --- a/tests/core/test_common.py +++ b/tests/core/test_common.py @@ -10,38 +10,25 @@ pytest -q test_common.py pytest -q test_common.py -k """ -from os import mkdir, makedirs +from os import makedirs from os.path import join, exists, abspath -from shutil import rmtree from pathlib import Path -from pytest import mark, fixture +from pytest import mark from nipype import Node, Function, Workflow -from narps_open.utils.configuration import Configuration import narps_open.core.common as co -TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_common') - -@fixture -def remove_test_dir(): - """ A fixture to remove temporary directory created by tests """ - - rmtree(TEMPORARY_DIR, ignore_errors = True) - mkdir(TEMPORARY_DIR) - yield # test runs here - rmtree(TEMPORARY_DIR, ignore_errors = True) - class TestCoreCommon: """ A class that contains all the unit tests for the common module.""" @staticmethod @mark.unit_test - def test_remove_file(remove_test_dir): + def test_remove_file(temporary_data_dir): """ Test the remove_file function """ # Create a single file - test_file_path = abspath(join(TEMPORARY_DIR, 'file1.txt')) + test_file_path = abspath(join(temporary_data_dir, 'file1.txt')) Path(test_file_path).touch() # Check file exist @@ -62,15 +49,15 @@ def test_remove_file(remove_test_dir): @staticmethod @mark.unit_test - def test_remove_directory(remove_test_dir): + def test_remove_directory(temporary_data_dir): """ Test the remove_directory function """ # Create a single inside dir tree - dir_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2')) + dir_path = abspath(join(temporary_data_dir, 'dir_1', 'dir_2')) makedirs(dir_path) - file_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2', 'file1.txt')) + file_path = abspath(join(temporary_data_dir, 'dir_1', 'dir_2', 'file1.txt')) Path(file_path).touch() - test_dir_path = abspath(join(TEMPORARY_DIR, 'dir_1')) + test_dir_path = abspath(join(temporary_data_dir, 'dir_1')) # Check file exist assert exists(file_path) @@ -90,13 +77,13 @@ def test_remove_directory(remove_test_dir): @staticmethod @mark.unit_test - def test_remove_parent_directory(remove_test_dir): + def test_remove_parent_directory(temporary_data_dir): """ Test the remove_parent_directory function """ # Create a single inside dir tree - dir_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2')) + dir_path = abspath(join(temporary_data_dir, 'dir_1', 'dir_2')) makedirs(dir_path) - file_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2', 'file1.txt')) + file_path = abspath(join(temporary_data_dir, 'dir_1', 'dir_2', 'file1.txt')) Path(file_path).touch() # Check file exist @@ -151,7 +138,7 @@ def test_node_elements_in_string(): @staticmethod @mark.unit_test - def test_connect_elements_in_string(remove_test_dir): + def test_connect_elements_in_string(temporary_data_dir): """ Test the elements_in_string function as evaluated in a connect """ # Inputs @@ -180,7 +167,7 @@ def test_connect_elements_in_string(remove_test_dir): # Create Workflow test_workflow = Workflow( - base_dir = TEMPORARY_DIR, + base_dir = temporary_data_dir, name = 'test_workflow' ) test_workflow.connect([ @@ -193,11 +180,13 @@ def test_connect_elements_in_string(remove_test_dir): test_workflow.run() - test_file_t = join(TEMPORARY_DIR, 'test_workflow', 'node_true', '_report', 'report.rst') + test_file_t = join(temporary_data_dir, + 'test_workflow', 'node_true', '_report', 'report.rst') with open(test_file_t, 'r', encoding = 'utf-8') as file: assert '* out_value : test_string' in file.read() - test_file_f = join(TEMPORARY_DIR, 'test_workflow', 'node_false', '_report', 'report.rst') + test_file_f = join(temporary_data_dir, + 'test_workflow', 'node_false', '_report', 'report.rst') with open(test_file_f, 'r', encoding = 'utf-8') as file: assert '* out_value : None' in file.read() @@ -238,7 +227,7 @@ def test_node_clean_list(): @staticmethod @mark.unit_test - def test_connect_clean_list(remove_test_dir): + def test_connect_clean_list(temporary_data_dir): """ Test the clean_list function as evaluated in a connect """ # Inputs @@ -269,7 +258,7 @@ def test_connect_clean_list(remove_test_dir): # Create Workflow test_workflow = Workflow( - base_dir = TEMPORARY_DIR, + base_dir = temporary_data_dir, name = 'test_workflow' ) test_workflow.connect([ @@ -279,11 +268,13 @@ def test_connect_clean_list(remove_test_dir): ]) test_workflow.run() - test_file_1 = join(TEMPORARY_DIR, 'test_workflow', 'node_1', '_report', 'report.rst') + test_file_1 = join(temporary_data_dir, + 'test_workflow', 'node_1', '_report', 'report.rst') with open(test_file_1, 'r', encoding = 'utf-8') as file: assert f'* out_value : {output_list_1}' in file.read() - test_file_2 = join(TEMPORARY_DIR, 'test_workflow', 'node_2', '_report', 'report.rst') + test_file_2 = join(temporary_data_dir, + 'test_workflow', 'node_2', '_report', 'report.rst') with open(test_file_2, 'r', encoding = 'utf-8') as file: assert f'* out_value : {output_list_2}' in file.read() @@ -324,7 +315,7 @@ def test_node_list_intersection(): @staticmethod @mark.unit_test - def test_connect_list_intersection(remove_test_dir): + def test_connect_list_intersection(temporary_data_dir): """ Test the list_intersection function as evaluated in a connect """ # Inputs / outputs @@ -355,7 +346,7 @@ def test_connect_list_intersection(remove_test_dir): # Create Workflow test_workflow = Workflow( - base_dir = TEMPORARY_DIR, + base_dir = temporary_data_dir, name = 'test_workflow' ) test_workflow.connect([ @@ -365,11 +356,13 @@ def test_connect_list_intersection(remove_test_dir): ]) test_workflow.run() - test_file_1 = join(TEMPORARY_DIR, 'test_workflow', 'node_1', '_report', 'report.rst') + test_file_1 = join(temporary_data_dir, + 'test_workflow', 'node_1', '_report', 'report.rst') with open(test_file_1, 'r', encoding = 'utf-8') as file: assert f'* out_value : {output_list_1}' in file.read() - test_file_2 = join(TEMPORARY_DIR, 'test_workflow', 'node_2', '_report', 'report.rst') + test_file_2 = join(temporary_data_dir, + 'test_workflow', 'node_2', '_report', 'report.rst') with open(test_file_2, 'r', encoding = 'utf-8') as file: assert f'* out_value : {output_list_2}' in file.read() diff --git a/tests/pipelines/test_team_J7F9.py b/tests/pipelines/test_team_J7F9.py index da37e990..bf3aeafc 100644 --- a/tests/pipelines/test_team_J7F9.py +++ b/tests/pipelines/test_team_J7F9.py @@ -10,29 +10,16 @@ pytest -q test_team_J7F9.py pytest -q test_team_J7F9.py -k """ -from os import mkdir from os.path import join, exists -from shutil import rmtree from filecmp import cmp -from pytest import helpers, mark, fixture +from pytest import helpers, mark from nipype import Workflow from nipype.interfaces.base import Bunch from narps_open.utils.configuration import Configuration from narps_open.pipelines.team_J7F9 import PipelineTeamJ7F9 -TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_J7F9') - -@fixture -def remove_test_dir(): - """ A fixture to remove temporary directory created by tests """ - - rmtree(TEMPORARY_DIR, ignore_errors = True) - mkdir(TEMPORARY_DIR) - yield # test runs here - rmtree(TEMPORARY_DIR, ignore_errors = True) - class TestPipelinesTeamJ7F9: """ A class that contains all the unit tests for the PipelineTeamJ7F9 class.""" @@ -188,7 +175,7 @@ def test_subject_information(): @staticmethod @mark.unit_test - def test_confounds_file(remove_test_dir): + def test_confounds_file(temporary_data_dir): """ Test the get_confounds_file method """ confounds_file = join( @@ -197,11 +184,11 @@ def test_confounds_file(remove_test_dir): Configuration()['directories']['test_data'], 'pipelines', 'team_J7F9', 'confounds.tsv') # Get new confounds file - PipelineTeamJ7F9.get_confounds_file(confounds_file, 'sid', 'rid', TEMPORARY_DIR) + PipelineTeamJ7F9.get_confounds_file(confounds_file, 'sid', 'rid', temporary_data_dir) # Check confounds file was created created_confounds_file = join( - TEMPORARY_DIR, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv') + temporary_data_dir, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv') assert exists(created_confounds_file) # Check contents diff --git a/tests/pipelines/test_team_T54A.py b/tests/pipelines/test_team_T54A.py index 2199cf40..954ca1bb 100644 --- a/tests/pipelines/test_team_T54A.py +++ b/tests/pipelines/test_team_T54A.py @@ -10,28 +10,15 @@ pytest -q test_team_T54A.py pytest -q test_team_T54A.py -k """ -from os import mkdir from os.path import exists, join -from shutil import rmtree -from pytest import helpers, mark, fixture +from pytest import helpers, mark from nipype import Workflow from nipype.interfaces.base import Bunch from narps_open.pipelines.team_T54A import PipelineTeamT54A from narps_open.utils.configuration import Configuration -TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_T54A') - -@fixture -def remove_test_dir(): - """ A fixture to remove temporary directory created by tests """ - - rmtree(TEMPORARY_DIR, ignore_errors = True) - mkdir(TEMPORARY_DIR) - yield # test runs here - rmtree(TEMPORARY_DIR, ignore_errors = True) - class TestPipelinesTeamT54A: """ A class that contains all the unit tests for the PipelineTeamT54A class.""" @@ -148,7 +135,7 @@ def test_subject_information(): @staticmethod @mark.unit_test - def test_parameters_file(remove_test_dir): + def test_parameters_file(temporary_data_dir): """ Test the get_parameters_file method """ confounds_file_path = join( @@ -158,12 +145,12 @@ def test_parameters_file(remove_test_dir): confounds_file_path, 'fake_subject_id', 'fake_run_id', - TEMPORARY_DIR + temporary_data_dir ) # Check parameter file was created assert exists(join( - TEMPORARY_DIR, + temporary_data_dir, 'parameters_file', 'parameters_file_sub-fake_subject_id_run-fake_run_id.tsv') ) diff --git a/tests/pipelines/test_team_U26C.py b/tests/pipelines/test_team_U26C.py index c670b520..163d005e 100644 --- a/tests/pipelines/test_team_U26C.py +++ b/tests/pipelines/test_team_U26C.py @@ -10,38 +10,16 @@ pytest -q test_team_U26C.py pytest -q test_team_U26C.py -k """ -from os import mkdir from os.path import join, exists -from shutil import rmtree from filecmp import cmp -from pytest import helpers, mark, fixture -from numpy import isclose +from pytest import helpers, mark from nipype import Workflow from nipype.interfaces.base import Bunch from narps_open.utils.configuration import Configuration from narps_open.pipelines.team_U26C import PipelineTeamU26C -TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_U26C') - -@fixture -def remove_test_dir(): - """ A fixture to remove temporary directory created by tests """ - - rmtree(TEMPORARY_DIR, ignore_errors = True) - mkdir(TEMPORARY_DIR) - yield # test runs here - #rmtree(TEMPORARY_DIR, ignore_errors = True) - -def compare_float_2d_arrays(array_1, array_2): - """ Assert array_1 and array_2 are close enough """ - - assert len(array_1) == len(array_2) - for reference_array, test_array in zip(array_1, array_2): - assert len(reference_array) == len(test_array) - assert isclose(reference_array, test_array).all() - class TestPipelinesTeamU26C: """ A class that contains all the unit tests for the PipelineTeamU26C class.""" @@ -91,32 +69,34 @@ def test_subject_information(): bunch = info[0] assert isinstance(bunch, Bunch) assert bunch.conditions == ['gamble_run1'] - compare_float_2d_arrays(bunch.onsets, [[4.071, 11.834, 19.535, 27.535, 36.435]]) - compare_float_2d_arrays(bunch.durations, [[4.0, 4.0, 4.0, 4.0, 4.0]]) - assert bunch.amplitudes == None - assert bunch.tmod == None + helpers.compare_float_2d_arrays(bunch.onsets, [[4.071, 11.834, 19.535, 27.535, 36.435]]) + helpers.compare_float_2d_arrays(bunch.durations, [[4.0, 4.0, 4.0, 4.0, 4.0]]) + assert bunch.amplitudes is None + assert bunch.tmod is None assert bunch.pmod[0].name == ['gain_run1', 'loss_run1'] assert bunch.pmod[0].poly == [1, 1] - compare_float_2d_arrays(bunch.pmod[0].param, [[14.0, 34.0, 38.0, 10.0, 16.0], [6.0, 14.0, 19.0, 15.0, 17.0]]) - assert bunch.regressor_names == None - assert bunch.regressors == None + helpers.compare_float_2d_arrays(bunch.pmod[0].param, + [[14.0, 34.0, 38.0, 10.0, 16.0], [6.0, 14.0, 19.0, 15.0, 17.0]]) + assert bunch.regressor_names is None + assert bunch.regressors is None bunch = info[1] assert isinstance(bunch, Bunch) assert bunch.conditions == ['gamble_run2'] - compare_float_2d_arrays(bunch.onsets, [[4.071, 11.834, 19.535, 27.535, 36.435]]) - compare_float_2d_arrays(bunch.durations, [[4.0, 4.0, 4.0, 4.0, 4.0]]) - assert bunch.amplitudes == None - assert bunch.tmod == None + helpers.compare_float_2d_arrays(bunch.onsets, [[4.071, 11.834, 19.535, 27.535, 36.435]]) + helpers.compare_float_2d_arrays(bunch.durations, [[4.0, 4.0, 4.0, 4.0, 4.0]]) + assert bunch.amplitudes is None + assert bunch.tmod is None assert bunch.pmod[0].name == ['gain_run2', 'loss_run2'] assert bunch.pmod[0].poly == [1, 1] - compare_float_2d_arrays(bunch.pmod[0].param, [[14.0, 34.0, 38.0, 10.0, 16.0], [6.0, 14.0, 19.0, 15.0, 17.0]]) - assert bunch.regressor_names == None - assert bunch.regressors == None + helpers.compare_float_2d_arrays(bunch.pmod[0].param, + [[14.0, 34.0, 38.0, 10.0, 16.0], [6.0, 14.0, 19.0, 15.0, 17.0]]) + assert bunch.regressor_names is None + assert bunch.regressors is None @staticmethod @mark.unit_test - def test_confounds_file(remove_test_dir): + def test_confounds_file(temporary_data_dir): """ Test the get_confounds_file method """ confounds_file = join( @@ -125,11 +105,11 @@ def test_confounds_file(remove_test_dir): Configuration()['directories']['test_data'], 'pipelines', 'team_U26C', 'confounds.tsv') # Get new confounds file - PipelineTeamU26C.get_confounds_file(confounds_file, 'sid', 'rid', TEMPORARY_DIR) + PipelineTeamU26C.get_confounds_file(confounds_file, 'sid', 'rid', temporary_data_dir) # Check confounds file was created created_confounds_file = join( - TEMPORARY_DIR, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv') + temporary_data_dir, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv') assert exists(created_confounds_file) # Check contents diff --git a/tests/pipelines/test_team_X19V.py b/tests/pipelines/test_team_X19V.py index e0a1a7f3..c83595f3 100644 --- a/tests/pipelines/test_team_X19V.py +++ b/tests/pipelines/test_team_X19V.py @@ -10,38 +10,16 @@ pytest -q test_team_X19V.py pytest -q test_team_X19V.py -k """ -from os import mkdir from os.path import join, exists -from shutil import rmtree from filecmp import cmp -from pytest import helpers, mark, fixture -from numpy import isclose +from pytest import helpers, mark from nipype import Workflow from nipype.interfaces.base import Bunch from narps_open.utils.configuration import Configuration from narps_open.pipelines.team_X19V import PipelineTeamX19V -TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_X19V') - -@fixture -def remove_test_dir(): - """ A fixture to remove temporary directory created by tests """ - - rmtree(TEMPORARY_DIR, ignore_errors = True) - mkdir(TEMPORARY_DIR) - yield # test runs here - #rmtree(TEMPORARY_DIR, ignore_errors = True) - -def compare_float_2d_arrays(array_1, array_2): - """ Assert array_1 and array_2 are close enough """ - - assert len(array_1) == len(array_2) - for reference_array, test_array in zip(array_1, array_2): - assert len(reference_array) == len(test_array) - assert isclose(reference_array, test_array).all() - class TestPipelinesTeamX19V: """ A class that contains all the unit tests for the PipelineTeamX19V class.""" @@ -95,24 +73,24 @@ def test_subject_information(): bunch = info_ok[0] assert isinstance(bunch, Bunch) assert bunch.conditions == ['trial', 'gain', 'loss'] - compare_float_2d_arrays(bunch.onsets, [ + helpers.compare_float_2d_arrays(bunch.onsets, [ [4.071, 11.834, 19.535, 27.535, 36.435], [4.071, 11.834, 19.535, 27.535, 36.435], [4.071, 11.834, 19.535, 27.535, 36.435]]) - compare_float_2d_arrays(bunch.durations, [ + helpers.compare_float_2d_arrays(bunch.durations, [ [4.0, 4.0, 4.0, 4.0, 4.0], [4.0, 4.0, 4.0, 4.0, 4.0], [4.0, 4.0, 4.0, 4.0, 4.0]]) - compare_float_2d_arrays(bunch.amplitudes, [ + helpers.compare_float_2d_arrays(bunch.amplitudes, [ [1.0, 1.0, 1.0, 1.0, 1.0], [-8.4, 11.6, 15.6, -12.4, -6.4], [-8.2, -0.2, 4.8, 0.8, 2.8]]) - assert bunch.regressor_names == None - assert bunch.regressors == None + assert bunch.regressor_names is None + assert bunch.regressors is None @staticmethod @mark.unit_test - def test_confounds_file(remove_test_dir): + def test_confounds_file(temporary_data_dir): """ Test the get_confounds_file method """ confounds_file = join( @@ -121,11 +99,11 @@ def test_confounds_file(remove_test_dir): Configuration()['directories']['test_data'], 'pipelines', 'team_X19V', 'confounds.tsv') # Get new confounds file - PipelineTeamX19V.get_confounds_file(confounds_file, 'sid', 'rid', TEMPORARY_DIR) + PipelineTeamX19V.get_confounds_file(confounds_file, 'sid', 'rid', temporary_data_dir) # Check confounds file was created created_confounds_file = join( - TEMPORARY_DIR, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv') + temporary_data_dir, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv') assert exists(created_confounds_file) # Check contents diff --git a/tests/test_conftest.py b/tests/test_conftest.py index 4ea92fb5..398a9a70 100644 --- a/tests/test_conftest.py +++ b/tests/test_conftest.py @@ -11,13 +11,12 @@ pytest -q test_conftest.py -k """ -from os import makedirs, remove -from os.path import join, abspath, isdir, isfile -from shutil import rmtree +from os import remove +from os.path import join, isdir, isfile from datetime import datetime -from pytest import mark, helpers, fixture, raises +from pytest import mark, helpers, raises from nipype import Node, Workflow from nipype.interfaces.utility import Function @@ -26,24 +25,13 @@ from narps_open.runner import PipelineRunner from narps_open.pipelines import Pipeline -TEST_DIR = abspath(join(Configuration()['directories']['test_runs'], 'test_conftest')) - -@fixture -def set_test_directory(scope = 'function'): - """ A fixture to remove temporary directory created by tests """ - - rmtree(TEST_DIR, ignore_errors = True) - makedirs(TEST_DIR, exist_ok = True) - yield - # Comment this line for debugging - rmtree(TEST_DIR, ignore_errors = True) - class MockupPipeline(Pipeline): """ A simple Pipeline class for test purposes """ - def __init__(self): + def __init__(self, base_dir: str): super().__init__() - self.test_file = join(TEST_DIR, 'test_conftest.txt') + self.base_dir = base_dir + self.test_file = join(base_dir, 'test_conftest.txt') # Init the test_file : write a number of execution set to zero with open(self.test_file, 'w', encoding = 'utf-8') as file: @@ -126,7 +114,7 @@ def create_workflow(self, workflow_name: str, file_list: list): node_files.inputs.file_list = file_list workflow = Workflow( - base_dir = TEST_DIR, + base_dir = self.base_dir, name = workflow_name ) workflow.add_nodes([node_count, node_decide, node_files]) @@ -166,21 +154,21 @@ def get_group_level_analysis(self): def get_preprocessing_outputs(self): """ Return a list of templates of the output files generated by the preprocessing """ - template = join(TEST_DIR, 'subject_id_{subject_id}_output_preprocessing_1.md') + template = join(self.base_dir, 'subject_id_{subject_id}_output_preprocessing_1.md') return [template.format(subject_id = s) for s in self.subject_list] def get_run_level_outputs(self): """ Return a list of templates of the output files generated by the run level analysis. Templates are expressed relatively to the self.directories.output_dir. """ - template = join(TEST_DIR, 'subject_id_{subject_id}_output_run_1.md') + template = join(self.base_dir, 'subject_id_{subject_id}_output_run_1.md') return [template.format(subject_id = s) for s in self.subject_list] def get_subject_level_outputs(self): """ Return a list of templates of the output files generated by the subject level analysis. Templates are expressed relatively to the self.directories.output_dir. """ - template = join(TEST_DIR, 'subject_id_{subject_id}_output_analysis_1.md') + template = join(self.base_dir, 'subject_id_{subject_id}_output_analysis_1.md') return [template.format(subject_id = s) for s in self.subject_list] def get_group_level_outputs(self): @@ -188,19 +176,19 @@ def get_group_level_outputs(self): Templates are expressed relatively to the self.directories.output_dir. """ templates = [ - join(TEST_DIR, 'group_{nb_subjects}_output_a.md'), - join(TEST_DIR, 'group_{nb_subjects}_output_b.md') + join(self.base_dir, 'group_{nb_subjects}_output_a.md'), + join(self.base_dir, 'group_{nb_subjects}_output_b.md') ] return_list = [t.format(nb_subjects = len(self.subject_list)) for t in templates] - template = join(TEST_DIR, 'hypothesis_{id}.md') + template = join(self.base_dir, 'hypothesis_{id}.md') return_list += [template.format(id = i) for i in range(1,19)] return return_list def get_hypotheses_outputs(self): """ Return the names of the files used by the team to answer the hypotheses of NARPS. """ - template = join(TEST_DIR, 'hypothesis_{id}.md') + template = join(self.base_dir, 'hypothesis_{id}.md') return [template.format(id = i) for i in range(1,19)] class MockupResultsCollection(): @@ -263,11 +251,11 @@ def test_compare_float_2d_arrays(): @staticmethod @mark.unit_test - def test_test_outputs(set_test_directory): + def test_test_outputs(temporary_data_dir): """ Test the test_pipeline_outputs helper """ # Test pipeline - pipeline = MockupPipeline() + pipeline = MockupPipeline(temporary_data_dir) pipeline.subject_list = ['001', '002'] # Wrong length for nb_of_outputs @@ -343,7 +331,7 @@ def test_test_correlation_results(mocker): @staticmethod @mark.unit_test - def test_test_pipeline_execution(mocker, set_test_directory): + def test_test_pipeline_execution(mocker, temporary_data_dir): """ Test the test_pipeline_execution helper """ # Set subgroups of subjects @@ -352,7 +340,7 @@ def test_test_pipeline_execution(mocker, set_test_directory): # Create mocks mocker.patch('conftest.get_correlation_coefficient', return_value = 1.0) fake_runner = PipelineRunner('2T6S') - fake_runner._pipeline = MockupPipeline() + fake_runner._pipeline = MockupPipeline(temporary_data_dir) mocker.patch('conftest.PipelineRunner', return_value = fake_runner) mocker.patch('conftest.ResultsCollection', return_value = MockupResultsCollection('2T6S')) @@ -360,13 +348,13 @@ def test_test_pipeline_execution(mocker, set_test_directory): helpers.test_pipeline_execution('test_conftest', 7) # Check outputs - assert isdir(join(TEST_DIR, 'TestConftest_preprocessing_workflow')) - assert isdir(join(TEST_DIR, 'TestConftest_run_level_workflow')) - assert isdir(join(TEST_DIR, 'TestConftest_subject_level_workflow')) - assert isdir(join(TEST_DIR, 'TestConftest_group_level_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_preprocessing_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_run_level_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_subject_level_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_group_level_workflow')) # Check executions - with open(join(TEST_DIR, 'test_conftest.txt'), 'r', encoding = 'utf-8') as file: + with open(join(temporary_data_dir, 'test_conftest.txt'), 'r', encoding = 'utf-8') as file: assert file.readline() == '0\n' # First exec of preprocessing creates an exception (execution counter == 1) assert file.readline() == 'TestConftest_preprocessing_workflow 4 1\n'