From 6986e90351bca4071ea61387bee1f52f1125aceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 31 Aug 2023 14:35:10 +0200 Subject: [PATCH 01/41] [BUG] inside unit_tests workflow --- .github/workflows/unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 20f20ea3..d0097882 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -34,7 +34,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - - name: Load configuration for self-hosted runner + - name: Load configuration for self-hosted runner run: cp /home/neuro/local_testing_config.toml narps_open/utils/configuration/testing_config.toml - name: Install dependencies From 08dd3410f17a8b18487fc6f8d26859848e95887f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 22 Nov 2023 12:07:46 +0100 Subject: [PATCH 02/41] [DOC] runner help --- narps_open/runner.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/runner.py b/narps_open/runner.py index 7a9594bf..0776c4aa 100644 --- a/narps_open/runner.py +++ b/narps_open/runner.py @@ -159,12 +159,12 @@ def get_missing_group_level_outputs(self): parser.add_argument('-t', '--team', type=str, required=True, help='the team ID') subjects = parser.add_mutually_exclusive_group(required=True) - subjects.add_argument('-r', '--rsubjects', type=str, - help='the number of subjects to be randomly selected') subjects.add_argument('-s', '--subjects', nargs='+', type=str, action='extend', - help='a list of subjects') + help='a list of subjects to be selected') subjects.add_argument('-n', '--nsubjects', type=str, - help='the number of subjects to be randomly selected') + help='the number of subjects to be selected') + subjects.add_argument('-r', '--rsubjects', type=str, + help='the number of subjects to be selected randomly') levels = parser.add_mutually_exclusive_group(required=False) levels.add_argument('-g', '--group', action='store_true', default=False, help='run the group level only') From 4c6c89447bd3df3d748d2e730941edcc1e5f64bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 10 Jan 2024 11:58:38 +0100 Subject: [PATCH 03/41] Creating entry-points for the project --- narps_open/data/description/__main__.py | 96 +++++++++++++------------ narps_open/data/results/__main__.py | 55 +++++++------- narps_open/runner.py | 7 +- narps_open/utils/status.py | 11 ++- setup.py | 10 ++- 5 files changed, 104 insertions(+), 75 deletions(-) diff --git a/narps_open/data/description/__main__.py b/narps_open/data/description/__main__.py index e538ff4d..b6c9ead3 100644 --- a/narps_open/data/description/__main__.py +++ b/narps_open/data/description/__main__.py @@ -8,49 +8,55 @@ from narps_open.data.description import TeamDescription -# Parse arguments -parser = ArgumentParser(description='Get description of a NARPS pipeline.') -parser.add_argument('-t', '--team', type=str, required=True, - help='the team ID') -parser.add_argument('-d', '--dictionary', type=str, required=False, - choices=[ - 'general', - 'exclusions', - 'preprocessing', - 'analysis', - 'categorized_for_analysis', - 'derived', - 'comments' - ], - help='the sub dictionary of team description') -formats = parser.add_mutually_exclusive_group(required = False) -formats.add_argument('--json', action='store_true', help='output team description as JSON') -formats.add_argument('--md', action='store_true', help='output team description as Markdown') -arguments = parser.parse_args() - -# Initialize a TeamDescription -information = TeamDescription(team_id = arguments.team) - -# Output description -if arguments.md and arguments.dictionary is not None: - print('Sub dictionaries cannot be exported as Markdown yet.') - print('Print the whole description instead.') -elif arguments.md: - print(information.markdown()) -else: - if arguments.dictionary == 'general': - print(dumps(information.general, indent = 4)) - elif arguments.dictionary == 'exclusions': - print(dumps(information.exclusions, indent = 4)) - elif arguments.dictionary == 'preprocessing': - print(dumps(information.preprocessing, indent = 4)) - elif arguments.dictionary == 'analysis': - print(dumps(information.analysis, indent = 4)) - elif arguments.dictionary == 'categorized_for_analysis': - print(dumps(information.categorized_for_analysis, indent = 4)) - elif arguments.dictionary == 'derived': - print(dumps(information.derived, indent = 4)) - elif arguments.dictionary == 'comments': - print(dumps(information.comments, indent = 4)) +def main(): + """ Entry-point for the command line tool narps_description """ + + # Parse arguments + parser = ArgumentParser(description='Get description of a NARPS pipeline.') + parser.add_argument('-t', '--team', type=str, required=True, + help='the team ID') + parser.add_argument('-d', '--dictionary', type=str, required=False, + choices=[ + 'general', + 'exclusions', + 'preprocessing', + 'analysis', + 'categorized_for_analysis', + 'derived', + 'comments' + ], + help='the sub dictionary of team description') + formats = parser.add_mutually_exclusive_group(required = False) + formats.add_argument('--json', action='store_true', help='output team description as JSON') + formats.add_argument('--md', action='store_true', help='output team description as Markdown') + arguments = parser.parse_args() + + # Initialize a TeamDescription + information = TeamDescription(team_id = arguments.team) + + # Output description + if arguments.md and arguments.dictionary is not None: + print('Sub dictionaries cannot be exported as Markdown yet.') + print('Print the whole description instead.') + elif arguments.md: + print(information.markdown()) else: - print(dumps(information, indent = 4)) + if arguments.dictionary == 'general': + print(dumps(information.general, indent = 4)) + elif arguments.dictionary == 'exclusions': + print(dumps(information.exclusions, indent = 4)) + elif arguments.dictionary == 'preprocessing': + print(dumps(information.preprocessing, indent = 4)) + elif arguments.dictionary == 'analysis': + print(dumps(information.analysis, indent = 4)) + elif arguments.dictionary == 'categorized_for_analysis': + print(dumps(information.categorized_for_analysis, indent = 4)) + elif arguments.dictionary == 'derived': + print(dumps(information.derived, indent = 4)) + elif arguments.dictionary == 'comments': + print(dumps(information.comments, indent = 4)) + else: + print(dumps(information, indent = 4)) + +if __name__ == '__main__': + main() diff --git a/narps_open/data/results/__main__.py b/narps_open/data/results/__main__.py index b9f1d728..88111b87 100644 --- a/narps_open/data/results/__main__.py +++ b/narps_open/data/results/__main__.py @@ -8,27 +8,34 @@ from narps_open.data.results import ResultsCollectionFactory from narps_open.pipelines import implemented_pipelines -# Parse arguments -parser = ArgumentParser(description='Get Neurovault collection of results from NARPS teams.') -group = parser.add_mutually_exclusive_group(required = True) -group.add_argument('-t', '--teams', nargs='+', type=str, action='extend', - help='a list of team IDs') -group.add_argument('-a', '--all', action='store_true', help='download results from all teams') -parser.add_argument('-r', '--rectify', action='store_true', default = False, required = False, - help='rectify the results') -arguments = parser.parse_args() - -factory = ResultsCollectionFactory() - -if arguments.all: - for team_id, _ in implemented_pipelines.items(): - collection = factory.get_collection(team_id) - collection.download() - if arguments.rectify: - collection.rectify() -else: - for team in arguments.teams: - collection = factory.get_collection(team) - collection.download() - if arguments.rectify: - collection.rectify() + +def main(): + """ Entry-point for the command line tool narps_results """ + + # Parse arguments + parser = ArgumentParser(description='Get Neurovault collection of results from NARPS teams.') + group = parser.add_mutually_exclusive_group(required = True) + group.add_argument('-t', '--teams', nargs='+', type=str, action='extend', + help='a list of team IDs') + group.add_argument('-a', '--all', action='store_true', help='download results from all teams') + parser.add_argument('-r', '--rectify', action='store_true', default = False, required = False, + help='rectify the results') + arguments = parser.parse_args() + + factory = ResultsCollectionFactory() + + if arguments.all: + for team_id, _ in implemented_pipelines.items(): + collection = factory.get_collection(team_id) + collection.download() + if arguments.rectify: + collection.rectify() + else: + for team in arguments.teams: + collection = factory.get_collection(team) + collection.download() + if arguments.rectify: + collection.rectify() + +if __name__ == '__main__': + main() diff --git a/narps_open/runner.py b/narps_open/runner.py index 0776c4aa..32c80180 100644 --- a/narps_open/runner.py +++ b/narps_open/runner.py @@ -152,7 +152,8 @@ def get_missing_group_level_outputs(self): return [f for f in files if not isfile(f)] -if __name__ == '__main__': +def main(): + """ Entry-point for the command line tool narps_open_runner """ # Parse arguments parser = ArgumentParser(description='Run the pipelines from NARPS.') @@ -191,7 +192,6 @@ def get_missing_group_level_outputs(self): # Check data if arguments.check: - missing_files = [] print('Missing files for team', arguments.team, 'after running', len(runner.pipeline.subject_list), 'subjects:') if not arguments.group: @@ -202,3 +202,6 @@ def get_missing_group_level_outputs(self): # Start the runner else: runner.start(arguments.first, arguments.group) + +if __name__ == '__main__': + main() diff --git a/narps_open/utils/status.py b/narps_open/utils/status.py index 0058b40b..4f80b11f 100644 --- a/narps_open/utils/status.py +++ b/narps_open/utils/status.py @@ -22,7 +22,6 @@ def get_opened_issues(): request_url = 'https://api.github.com/repos/Inria-Empenn/narps_open_pipelines' response = get(request_url, timeout = 2) response.raise_for_status() - nb_issues = response.json()['open_issues'] # Get all opened issues request_url = 'https://api.github.com/repos/Inria-Empenn/narps_open_pipelines/issues' @@ -185,11 +184,14 @@ def markdown(self): reproducibility_ranking += ':star:' for _ in range(4-team_values['reproducibility']): reproducibility_ranking += ':black_small_square:' - output_markdown += f'| {reproducibility_ranking}
{team_values["reproducibility_comment"]} |\n' + output_markdown += f'| {reproducibility_ranking}
' + output_markdown += f'{team_values["reproducibility_comment"]} |\n' return output_markdown -if __name__ == '__main__': +def main(): + """ Entry-point for the command line tool narps_open_status """ + # Parse arguments parser = ArgumentParser(description='Get a work progress status report for pipelines.') formats = parser.add_mutually_exclusive_group(required = False) @@ -204,3 +206,6 @@ def markdown(self): print(report.markdown()) else: print(report) + +if __name__ == '__main__': + main() diff --git a/setup.py b/setup.py index 2c6c9b06..d28a3dab 100644 --- a/setup.py +++ b/setup.py @@ -63,5 +63,13 @@ ('narps_open/data/description', ['narps_open/data/description/analysis_pipelines_comments.tsv']), ('narps_open/data/description', ['narps_open/data/description/analysis_pipelines_derived_descriptions.tsv']), ('narps_open/data/description', ['narps_open/data/description/analysis_pipelines_full_descriptions.tsv']) - ] + ], + entry_points = { + 'console_scripts': [ + 'narps_open_runner = narps_open.runner:main', + 'narps_open_status = narps_open.utils.status:main', + 'narps_description = narps_open.data.description.__main__:main', + 'narps_results = narps_open.data.results.__main__:main' + ] + } ) From 0f71b3194ba59911a71788fc42c6069e2d9bec5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 10 Jan 2024 13:43:23 +0100 Subject: [PATCH 04/41] [DOC] command line tools --- docs/data.md | 27 +++++++++++++++------------ docs/description.md | 11 +++++++---- docs/running.md | 18 +++++++++++------- docs/status.md | 9 ++++++--- 4 files changed, 39 insertions(+), 26 deletions(-) diff --git a/docs/data.md b/docs/data.md index c5b55fba..3471729a 100644 --- a/docs/data.md +++ b/docs/data.md @@ -67,28 +67,31 @@ for team in teams: collection.rectify() # Rectified versions are created ``` +> [! TIP] +> In the following examples, use `narps_results` or `python narps_open/data/results` indifferently to launch the command line tool. + ```bash # From the command line -$ python narps_open/data/results -h -usage: results [-h] (-t TEAMS [TEAMS ...] | -a) [-r] +narps_results -h + usage: results [-h] (-t TEAMS [TEAMS ...] | -a) [-r] -Get Neurovault collection of results from NARPS teams. + Get Neurovault collection of results from NARPS teams. -options: - -h, --help show this help message and exit - -t TEAMS [TEAMS ...], --teams TEAMS [TEAMS ...] - a list of team IDs - -a, --all download results from all teams - -r, --rectify rectify the results + options: + -h, --help show this help message and exit + -t TEAMS [TEAMS ...], --teams TEAMS [TEAMS ...] + a list of team IDs + -a, --all download results from all teams + -r, --rectify rectify the results # Either download all collections -python narps_open/utils/results -a +narps_results -a # Or select the ones you need -python narps_open/utils/results -t 2T6S C88N L1A8 +narps_results -t 2T6S C88N L1A8 # Download and rectify the collections -python narps_open/utils/results -r -t 2T6S C88N L1A8 +narps_results -r -t 2T6S C88N L1A8 ``` The collections are also available [here](https://zenodo.org/record/3528329/) as one release on Zenodo that you can download. diff --git a/docs/description.md b/docs/description.md index ac17f588..1723f64f 100644 --- a/docs/description.md +++ b/docs/description.md @@ -12,8 +12,11 @@ The class `TeamDescription` of module `narps_open.data.description` acts as a pa You can use the command-line tool as so. Option `-t` is for the team id, option `-d` allows to print only one of the sub parts of the description among : `general`, `exclusions`, `preprocessing`, `analysis`, `categorized_for_analysis`, `derived`, and `comments`. Options `--json` and `--md` allow to choose the export format you prefer between JSON and Markdown. +> [! TIP] +> In the following examples, use `narps_description` or `python narps_open/data/description` indifferently to launch the command line tool. + ```bash -python narps_open/data/description -h +narps_description -h # usage: __init__.py [-h] -t TEAM [-d {general,exclusions,preprocessing,analysis,categorized_for_analysis,derived,comments}] # # Get description of a NARPS pipeline. @@ -26,7 +29,7 @@ python narps_open/data/description -h # --json output team description as JSON # --md output team description as Markdown -python narps_open/data/description -t 2T6S --json +narps_description -t 2T6S --json # { # "general.teamID": "2T6S", # "general.NV_collection_link": "https://neurovault.org/collections/4881/", @@ -41,7 +44,7 @@ python narps_open/data/description -t 2T6S --json # "preprocessing.preprocessing_order": "We used the provided preprocessed data by fMRIPprep 1.1.4 (Esteban, Markiewicz, et al. (2018); Esteban, Blair, et al. (2018); RRID:SCR_016216), which is based on Nipype 1.1.1 (Gorgolewski et al. (2011); Gorgolewski et al. (2018); RRID:SCR_002502) and we additionally conducted a spatial smoothing using the provided preprocessed data set and SPM12. Here, we attach the preprocessing steps described in the provided data set. \nAnatomical data preprocessing\nThe T1-weighted (T1w) image was corrected for intensity non-uniformity (INU) using N4BiasFieldCorrection (Tustison et al. 2010, ANTs 2.2.0), and used as T1w-reference throughout the workflow. The T1w-reference was then skull-stripped using antsBrainExtraction.sh (ANTs 2.2.0), using OASIS as target template. Brain surfaces we # ... -python narps_open/data/description -t 2T6S -d general --json +narps_description -t 2T6S -d general --json # { # "teamID": "2T6S", # "NV_collection_link": "https://neurovault.org/collections/4881/", @@ -53,7 +56,7 @@ python narps_open/data/description -t 2T6S -d general --json # "general_comments": "NA" # } -python narps_open/data/description -t 2T6S --md +narps_description -t 2T6S --md # # NARPS team description : 2T6S # ## General # * `teamID` : 2T6S diff --git a/docs/running.md b/docs/running.md index eb614eef..edf94171 100644 --- a/docs/running.md +++ b/docs/running.md @@ -2,10 +2,13 @@ ## Using the runner application -The `narps_open.runner` module allows to run pipelines from the command line : +The `narps_open.runner` module allows to run pipelines from the command line. + +> [! TIP] +> In the following examples, use `narps_open_runner` or `python narps_open/runner.py` indifferently to launch the command line tool. ```bash -python narps_open/runner.py -h +narps_open_runner -h usage: runner.py [-h] -t TEAM (-r RANDOM | -s SUBJECTS [SUBJECTS ...]) [-g | -f] Run the pipelines from NARPS. @@ -19,13 +22,14 @@ python narps_open/runner.py -h -f, --first run the first levels only (preprocessing + subjects + runs) -c, --check check pipeline outputs (runner is not launched) -python narps_open/runner.py -t 2T6S -s 001 006 020 100 -python narps_open/runner.py -t 2T6S -r 4 -python narps_open/runner.py -t 2T6S -r 4 -f -python narps_open/runner.py -t 2T6S -r 4 -f -c # Check the output files without launching the runner +narps_open_runner -t 2T6S -s 001 006 020 100 +narps_open_runner -t 2T6S -r 4 +narps_open_runner -t 2T6S -r 4 -f +narps_open_runner -t 2T6S -r 4 -f -c # Check the output files without launching the runner ``` -In this usecase, the paths where to store the outputs and to the dataset are picked by the runner from the [configuration](docs/configuration.md). +> [! NOTE] +> In this usecase, the paths where to store the outputs and to the dataset are picked by the runner from the [configuration](docs/configuration.md). ## Using the `PipelineRunner` object diff --git a/docs/status.md b/docs/status.md index 28492390..8ffc7beb 100644 --- a/docs/status.md +++ b/docs/status.md @@ -46,8 +46,11 @@ report.markdown() # Returns a string containing the markdown You can also use the command-line tool as so. +> [! TIP] +> In the following examples, use `narps_open_status` or `python narps_open/utils/status.py` indifferently to launch the command line tool. + ```bash -python narps_open/utils/status -h +narps_open_status -h # usage: status.py [-h] [--json | --md] # # Get a work progress status report for pipelines. @@ -57,7 +60,7 @@ python narps_open/utils/status -h # --json output the report as JSON # --md output the report as Markdown -python narps_open/utils/status --json +narps_open_status --json # { # "08MQ": { # "softwares": "FSL", @@ -83,7 +86,7 @@ python narps_open/utils/status --json # }, # ... -python narps_open/utils/status --md +narps_open_status --md # ... # | team_id | status | main software | fmriprep used ? | related issues | related pull requests | excluded from NARPS analysis | reproducibility | # | --- |:---:| --- | --- | --- | --- | --- | --- | From 7adb57f565c6f67f352f64e517a7ea8b3d83bd43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 10 Jan 2024 14:02:26 +0100 Subject: [PATCH 05/41] [DOC] command line tools --- INSTALL.md | 26 ++++++++++++++++++++++---- docs/data.md | 2 +- docs/description.md | 2 +- docs/running.md | 4 ++-- docs/status.md | 2 +- 5 files changed, 27 insertions(+), 9 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 3d78cd3b..872e0b44 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -74,12 +74,30 @@ cd /home/neuro/code/ pip install . ``` -Finally, you are able to run pipelines : +Finally, you are able to use the scripts of the project : + +* `narps_open_runner`: run pipelines +* `narps_description`: get the textual description made by a team +* `narps_results`: download the original results from teams +* `narps_open_status`: get status information about the development process of the pipelines ```bash -python narps_open/runner.py - usage: runner.py [-h] -t TEAM (-r RSUBJECTS | -s SUBJECTS [SUBJECTS ...] | -n NSUBJECTS) [-g | -f] [-c] +# Run the pipeline for team 2T6S, with 40 subjects +narps_open_runner -t 2T6S -n 40 + +# Get the description of team C88N in markdown formatting +narps_description -t C88N --md + +# Download the results from all teams +narps_results -a + +# Get the pipeline work status information in json formatting +narps_open_status --json ``` > [!NOTE] -> For further information, read this documentation page [docs/running.md](docs/running.md). +> For further information about these command line tools, read the corresponding documentation pages. +> * `narps_open_runner` : [docs/running.md](docs/running.md) +> * `narps_description` : [docs/description.md](docs/description.md) +> * `narps_results` : [docs/data.md](docs/data.md) +> * `narps_open_status` : [docs/status.md](docs/status.md) diff --git a/docs/data.md b/docs/data.md index 3471729a..3a68b32e 100644 --- a/docs/data.md +++ b/docs/data.md @@ -67,7 +67,7 @@ for team in teams: collection.rectify() # Rectified versions are created ``` -> [! TIP] +> [!TIP] > In the following examples, use `narps_results` or `python narps_open/data/results` indifferently to launch the command line tool. ```bash diff --git a/docs/description.md b/docs/description.md index 1723f64f..82f78097 100644 --- a/docs/description.md +++ b/docs/description.md @@ -12,7 +12,7 @@ The class `TeamDescription` of module `narps_open.data.description` acts as a pa You can use the command-line tool as so. Option `-t` is for the team id, option `-d` allows to print only one of the sub parts of the description among : `general`, `exclusions`, `preprocessing`, `analysis`, `categorized_for_analysis`, `derived`, and `comments`. Options `--json` and `--md` allow to choose the export format you prefer between JSON and Markdown. -> [! TIP] +> [!TIP] > In the following examples, use `narps_description` or `python narps_open/data/description` indifferently to launch the command line tool. ```bash diff --git a/docs/running.md b/docs/running.md index edf94171..b2f7da77 100644 --- a/docs/running.md +++ b/docs/running.md @@ -4,7 +4,7 @@ The `narps_open.runner` module allows to run pipelines from the command line. -> [! TIP] +> [!TIP] > In the following examples, use `narps_open_runner` or `python narps_open/runner.py` indifferently to launch the command line tool. ```bash @@ -28,7 +28,7 @@ narps_open_runner -t 2T6S -r 4 -f narps_open_runner -t 2T6S -r 4 -f -c # Check the output files without launching the runner ``` -> [! NOTE] +> [!NOTE] > In this usecase, the paths where to store the outputs and to the dataset are picked by the runner from the [configuration](docs/configuration.md). ## Using the `PipelineRunner` object diff --git a/docs/status.md b/docs/status.md index 8ffc7beb..d461b1ea 100644 --- a/docs/status.md +++ b/docs/status.md @@ -46,7 +46,7 @@ report.markdown() # Returns a string containing the markdown You can also use the command-line tool as so. -> [! TIP] +> [!TIP] > In the following examples, use `narps_open_status` or `python narps_open/utils/status.py` indifferently to launch the command line tool. ```bash From 13c0393687d62613f2020e83f25f47dc4aadb2b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 10 Jan 2024 16:43:32 +0100 Subject: [PATCH 06/41] Adding a tester command line tool --- INSTALL.md | 8 +++++++- docs/testing.md | 24 +++++++++++++++++++++++- narps_open/tester.py | 29 +++++++++++++++++++++++++++++ setup.py | 1 + 4 files changed, 60 insertions(+), 2 deletions(-) create mode 100644 narps_open/tester.py diff --git a/INSTALL.md b/INSTALL.md index 872e0b44..0ad3bdfa 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -77,6 +77,7 @@ pip install . Finally, you are able to use the scripts of the project : * `narps_open_runner`: run pipelines +* `narps_open_tester`: run a pipeline and test its results against original ones from the team * `narps_description`: get the textual description made by a team * `narps_results`: download the original results from teams * `narps_open_status`: get status information about the development process of the pipelines @@ -85,6 +86,10 @@ Finally, you are able to use the scripts of the project : # Run the pipeline for team 2T6S, with 40 subjects narps_open_runner -t 2T6S -n 40 +# Run the pipeline for team 08MQ, compare results with original ones, +# and produces a report with correlation values. +narps_open_tester -t 08MQ + # Get the description of team C88N in markdown formatting narps_description -t C88N --md @@ -98,6 +103,7 @@ narps_open_status --json > [!NOTE] > For further information about these command line tools, read the corresponding documentation pages. > * `narps_open_runner` : [docs/running.md](docs/running.md) +> * `narps_open_tester` : [docs/testing.md](docs/testing.md#command-line-tool) > * `narps_description` : [docs/description.md](docs/description.md) -> * `narps_results` : [docs/data.md](docs/data.md) +> * `narps_results` : [docs/data.md](docs/data.md#results-from-narps-teams) > * `narps_open_status` : [docs/status.md](docs/status.md) diff --git a/docs/testing.md b/docs/testing.md index 5294ea9b..1ea3b66c 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -2,6 +2,13 @@ :mega: This file describes the test suite and features for the project. +## Test dependencies + +Before using the test suite, make sure you installed all the dependencies, after step 5 of the [installation process](docs/install.md), run this command: +```bash +pip install .[tests] +``` + ## Static analysis We use [*pylint*](http://pylint.pycqa.org/en/latest/) to run static code analysis. @@ -24,7 +31,7 @@ black ./narps_open/runner.py ## Automatic tests -Use [*pytest*](https://docs.pytest.org/en/6.2.x/contents.html) to run automatic testing and its [*pytest-cov*](https://pytest-cov.readthedocs.io/en/latest/) plugin to control code coverage. Furthermore, [*pytest-helpers-namespace*](https://pypi.org/project/pytest-helpers-namespace/) enables to register helper functions. +We use [*pytest*](https://docs.pytest.org/en/6.2.x/contents.html) to run automatic testing and its [*pytest-cov*](https://pytest-cov.readthedocs.io/en/latest/) plugin to control code coverage. Furthermore, [*pytest-helpers-namespace*](https://pypi.org/project/pytest-helpers-namespace/) enables to register helper functions. > The pytest framework makes it easy to write small tests, yet scales to support complex functional testing for applications and libraries. @@ -36,6 +43,21 @@ Tests can be launched manually or while using CI (Continuous Integration). * To run a tests with a given mark 'mark' : `pytest -m 'mark'` * To create code coverage data : `coverage run -m pytest ./tests` then `coverage report` to see the code coverage result or `coverage xml` to output a .xml report file +## Command line tool + +We created the simple command line tool `narps_open_tester` to help testing the outcome of one pipeline. + +> [!WARNING] +> This command must be launched from inside the repository's root directory, because it needs to access the `tests` directory relatively to the current/working directory. + +```bash +narps_open_tester -t 08MQ +``` + +This will run the pipeline for the requested team -here 08MQ- on subsets of subjects (20, 40, 60, 80 and 108). For each subset, the outputs of the pipeline (statistical maps for each of the 9 hypotheses) will be compared with original results from the team using a Pearson correlation computation. At each step, if one of the correlation score is below the threshold (see `correlation_thresholds` defined in `narps_open/utils/configuration/testing_config.toml`), the tests ends. Otherwise, it proceeds to the next step, i.e.: the next subset of subjects. + +Once finished, a text file report (`test_pipeline-*.txt`) is created, containing all the computed correlation values. + ## Configuration files for testing * `pytest.ini` is a global configuration files for using pytest (see reference [here](https://docs.pytest.org/en/7.1.x/reference/customize.html)). It allows to [register markers](https://docs.pytest.org/en/7.1.x/example/markers.html) that help to better identify tests. Note that `pytest.ini` could be replaced by data inside `pyproject.toml` in the next versions. diff --git a/narps_open/tester.py b/narps_open/tester.py new file mode 100644 index 00000000..1a2cf284 --- /dev/null +++ b/narps_open/tester.py @@ -0,0 +1,29 @@ +#!/usr/bin/python +# coding: utf-8 + +""" This module allows to compare pipeline output with original team results """ + +import sys +from argparse import ArgumentParser + +import pytest + +def main(): + """ Entry-point for the command line tool narps_open_tester """ + + # Parse arguments + parser = ArgumentParser(description='Test the pipelines from NARPS.') + parser.add_argument('-t', '--team', type=str, required=True, + help='the team ID') + arguments = parser.parse_args() + + sys.exit(pytest.main([ + '-s', + '-q', + '-x', + f'tests/pipelines/test_team_{arguments.team}.py', + '-m', + 'pipeline_test'])) + +if __name__ == '__main__': + main() diff --git a/setup.py b/setup.py index d28a3dab..91a2d63a 100644 --- a/setup.py +++ b/setup.py @@ -67,6 +67,7 @@ entry_points = { 'console_scripts': [ 'narps_open_runner = narps_open.runner:main', + 'narps_open_tester = narps_open.tester:main', 'narps_open_status = narps_open.utils.status:main', 'narps_description = narps_open.data.description.__main__:main', 'narps_results = narps_open.data.results.__main__:main' From a73f3c3346eb589b43ebdaa756367d8fb7e45b72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 11 Jan 2024 14:53:15 +0100 Subject: [PATCH 07/41] [DATALAD] change results url --- .gitmodules | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitmodules b/.gitmodules index d3eaeb2f..364a3345 100644 --- a/.gitmodules +++ b/.gitmodules @@ -5,6 +5,6 @@ datalad-url = https://github.com/OpenNeuroDatasets/ds001734.git [submodule "data/results"] path = data/results - url = https://gin.g-node.org/RemiGau/neurovault_narps_open_pipeline.git - datalad-url = https://gin.g-node.org/RemiGau/neurovault_narps_open_pipeline.git + url = https://gin.g-node.org/RemiGau/neurovault_narps_open_pipeline + datalad-url = https://gin.g-node.org/RemiGau/neurovault_narps_open_pipeline datalad-id = b7b70790-7b0c-40d3-976f-c7dd49df3b86 From 90aee19661c44cdc59a78e8445f266c67c824e10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 22 Jan 2024 11:48:09 +0100 Subject: [PATCH 08/41] First version of U26C --- narps_open/pipelines/team_U26C.py | 665 ++++++++++++++++++++++++++++++ 1 file changed, 665 insertions(+) create mode 100755 narps_open/pipelines/team_U26C.py diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py new file mode 100755 index 00000000..a6f434f8 --- /dev/null +++ b/narps_open/pipelines/team_U26C.py @@ -0,0 +1,665 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Write the work of NARPS' team U26C using Nipype """ + +from os.path import join +from itertools import product + +from nipype import Workflow, Node, MapNode +from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.io import SelectFiles, DataSink +from nipype.interfaces.spm import ( + Smooth, + OneSampleTTestDesign, EstimateModel, EstimateContrast, + Level1Design, TwoSampleTTestDesign, Threshold + ) +from nipype.algorithms.modelgen import SpecifySPMModel +from nipype.algorithms.misc import Gunzip + +from narps_open.pipelines import Pipeline +from narps_open.data.task import TaskInformation +from narps_open.data.participants import get_group +from narps_open.core.common import remove_file, list_intersection, elements_in_string, clean_list + +class PipelineTeamU26C(Pipeline): + """ A class that defines the pipeline of team U26C. """ + + def __init__(self): + super().__init__() + self.fwhm = 5.0 + self.team_id = 'U26C' + + gamble = [f'gamble_run{r}' for r in range(1, len(self.run_list) + 1)] + gain = [f'gamble_run{r}xgain_run{r}^1' for r in range(1, len(self.run_list) + 1)] + loss = [f'gamble_run{r}xloss_run{r}^1' for r in range(1, len(self.run_list) + 1)] + + self.subject_level_contrasts = [ + ['gamble', 'T', gamble, [1, 1, 1, 1]], + ['gain', 'T', gain, [1, 1, 1, 1]], + ['loss', 'T', loss, [1, 1, 1, 1]] + ] + + def get_preprocessing(self): + """ No preprocessing has been done by team U26C """ + return None + + def get_run_level_analysis(self): + """ No run level analysis has been done by team U26C """ + return None + + # @staticmethod # Starting python 3.10, staticmethod should be used here + # Otherwise it produces a TypeError: 'staticmethod' object is not callable + def get_subject_information(event_files: list, model: str): + """ Create Bunchs for SpecifySPMModel. + + Parameters : + - event_files: list of str, list of events files (one per run) for the subject + - model: str, either 'gain' or 'loss' + + Returns : + - subject_information : list of Bunch for 1st level analysis. + """ + + '''Picks onsets and durations per condition and adds them to lists. + This function specifically picks onsets for the speech vs speaker + where the presentation is clear or in noise. + The function accepts event files. + + 'subject_id' is a string, i.e., sub-001 + ''' + + cond_names = ['gamble'] + onset = {} + duration = {} + weights_gain = {} + weights_loss = {} + runs = ['01', '02', '03', '04'] + + for r in range(len(runs)): # Loop over number of runs. + onset.update({s + '_run' + str(r+1): [] for s in cond_names}) + duration.update({s + '_run' + str(r+1): [] for s in cond_names}) + weights_gain.update({'gain_run' + str(r+1): []}) + weights_loss.update({'loss_run' + str(r+1): []}) + + base_name = '/data/pt_nmc002/other/narps/event_tsvs/' + # subject_id = 'sub-001' + for ir, run in enumerate(runs): + f_events = base_name + subject_id + \ + '_task-MGT_run-' + runs[ir] + '_events.tsv' + with open(f_events, 'rt') as f: + next(f) # skip the header + for line in f: + info = line.strip().split() + for cond in cond_names: + val = cond + '_run' + str(ir+1) + val_gain = 'gain_run' + str(ir+1) + val_loss = 'loss_run' + str(ir+1) + onset[val].append(float(info[0])) + duration[val].append(float(info[1])) + weights_gain[val_gain].append(float(info[2])) + weights_loss[val_loss].append(float(info[3])) + # if cond == 'gain': + # weights[val].append(float(info[2])) + # elif cond == 'loss': + # weights[val].append(float(info[3])) + # elif cond == 'task-activ': + # weights[val].append(float(1)) + from nipype.interfaces.base import Bunch + + # Bunching is done per run, i.e. cond1_run1, cond2_run1, etc. + subjectinfo = [] + for r in range(len(runs)): + + cond = [c + '_run' + str(r+1) for c in cond_names] + gain = 'gain_run' + str(r+1) + loss = 'loss_run' + str(r+1) + + subjectinfo.insert(r, + Bunch(conditions=cond, + onsets=[onset[k] for k in cond], + durations=[duration[k] for k in cond], + amplitudes=None, + tmod=None, + pmod=[Bunch(name=[gain, loss], + poly=[1, 1], + param=[weights_gain[gain], + weights_loss[loss]])], + regressor_names=None, + regressors=None)) + + return subjectinfo + + # @staticmethod # Starting python 3.10, staticmethod should be used here + # Otherwise it produces a TypeError: 'staticmethod' object is not callable + def get_confounds_file(filepath, subject_id, run_id, working_dir): + """ + Create a new tsv files with only desired confounds per subject per run. + Also computes the first derivative of the motion parameters. + + Parameters : + - filepath : path to the subject confounds file + - subject_id : related subject id + - run_id : related run id + - working_dir: str, name of the directory for intermediate results + + Return : + - confounds_file : path to new file containing only desired confounds + """ + from os import makedirs + from os.path import join + + from pandas import DataFrame, read_csv + from numpy import array, transpose, insert, diff + + # Open original confounds file + data_frame = read_csv(filepath, sep = '\t', header=0) + + # Extract confounds we want to use for the model + retained_parameters = DataFrame(transpose(array([ + data_frame['CSF'], data_frame['WhiteMatter'], + data_frame['X'], data_frame['Y'], data_frame['Z'], + data_frame['RotX'], data_frame['RotY'], data_frame['RotZ'], + insert(diff(data_frame['X']), 0, 0), + insert(diff(data_frame['Y']), 0, 0), + insert(diff(data_frame['Z']), 0, 0), + insert(diff(data_frame['RotX']), 0, 0), + insert(diff(data_frame['RotY']), 0, 0), + insert(diff(data_frame['RotZ']), 0, 0) + ] + ))) + + # Write confounds to a file + confounds_file = join(working_dir, 'confounds_files', + f'confounds_file_sub-{subject_id}_run-{run_id}.tsv') + + makedirs(join(working_dir, 'confounds_files'), exist_ok = True) + + with open(confounds_file, 'w', encoding = 'utf-8') as writer: + writer.write(retained_parameters.to_csv( + sep = '\t', index = False, header = False, na_rep = '0.0')) + + return confounds_file + + def get_subject_level_analysis(self): + """ + Create the subject level analysis workflow. + + Returns: + - subject_level_analysis : nipype.WorkFlow + """ + # Identitiy interface Node - to iterate over subject_id and run + infosource = Node(interface=IdentityInterface(fields=['subject_id']), + name = 'infosource') + infosource.iterables = [('subject_id', subs)] + + # Select files from derivatives + templates = { + 'func': join('derivatives', 'fmriprep', '{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'), + 'confounds' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_bold_confounds.tsv'), + 'events': join('derivatives', 'fmriprep', + '{subject_id}_task-MGT_run-*_events.tsv') + } + selectderivs = Node(SelectFiles(templates), name = 'selectderivs') + selectderivs.inputs.sort_filelist = True + + # Gunzip - gunzip files because SPM do not use .nii.gz files + gunzip = MapNode(Gunzip(), name='gunzip', iterfield=['in_file']) + + # Smooth warped functionals. + smooth = Node(Smooth(), name = 'smooth') + smooth.inputs.overwrite = False + smooth.iterables = ('fwhm', fwhmlist) + + # Function node get_subject_information - get subject specific condition information + getsubinforuns = Node(Function( + function = pick_onsets, + input_names = ['subject_id'], + output_names = ['subject_info'] + ), + name = 'getsubinforuns') + + # Function node get_confounds_file - get confounds files + confounds = MapNode(Function( + function = self.get_confounds_file, + input_names = ['filepath', 'subject_id', 'run_id', 'working_dir'], + output_names = ['confounds_file']), + name = 'confounds', iterfield = ['filepath', 'run_id']) + confounds.inputs.working_dir = self.directories.working_dir + confounds.inputs.run_id = self.run_list + + modelspec = Node(SpecifySPMModel(), name = 'modelspec') + modelspec.inputs.overwrite = False + modelspec.inputs.concatenate_runs = False + modelspec.inputs.input_units = 'secs' + modelspec.inputs.output_units = 'secs' + modelspec.inputs.time_repetition = TaskInformation()['RepetitionTime'] + modelspec.inputs.high_pass_filter_cutoff = 128 + + level1design = Node(Level1Design(), name = 'level1design') + level1design.inputs.overwrite = False + level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} + level1design.inputs.timing_units = 'secs' + level1design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + + level1estimate = Node(EstimateModel(), name = 'level1estimate') + level1estimate.inputs.overwrite = False + level1estimate.inputs.estimation_method = {'Classical': 1} + + contrast_estimate = Node(EstimateContrast(), name = 'contraste_estimate') + contrast_estimate.inputs.overwrite=False, + contrast_estimate.config = {'execution': {'remove_unnecessary_outputs': False}} + + contrasts = Node(Function( + function = con_setup, + input_names = ['subject_id'], + output_names = ['contrasts'] + ), + name = 'contrasts') + + subject_level_analysis = Workflow( + base_dir = self.directories.working_dir, name = 'subject_level_analysis' + ) + subject_level_analysis.connect([ + (infosource, selectderivs, [('subject_id', 'subject_id')]), + (infosource, contrasts, [('subject_id', 'subject_id')]), + (infosource, getsubinforuns, [('subject_id', 'subject_id')]), + (infosource, confounds, [('subject_id', 'subject_id')]), + (selectderivs, gunzip, [('func', 'in_file')]), + (selectderivs, confounds, [('confounds', 'filepath')]), + (gunzip, smooth, [('out_file', 'in_files')]), + (contrasts, contrast_estimate, [('contrasts', 'contrasts')]), + (contrast_estimate, selectcontrast, [('con_images', 'inlist')]), + (selectcontrast, overlaystats, [('out', 'stat_image')]), + (overlaystats, slicestats, [('out_file', 'in_file')]), + (getsubinforuns, modelspec, [('subject_info', 'subject_info')]), + (confounds, modelspec, [('confounds_file', 'realignment_parameters')]), + (smooth, modelspec, [('smoothed_files', 'functional_runs')]), + (modelspec, level1design, [('session_info', 'session_info')]), + (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]), + (level1estimate, contrast_estimate,[ + ('spm_mat_file', 'spm_mat_file'), + ('beta_images', 'beta_images'), + ('residual_image', 'residual_image')]) + ]) + + return subject_level_analysis + + def get_subject_level_outputs(self): + """ Return the names of the files the subject level analysis is supposed to generate. """ + + # Handle gain files + templates = [join( + self.directories.output_dir, + 'subject_level_analysis_gain', '_subject_id_{subject_id}', 'con_0001.nii')] + templates += [join( + self.directories.output_dir, + 'subject_level_analysis_gain', '_subject_id_{subject_id}', 'SPM.mat')] + templates += [join( + self.directories.output_dir, + 'subject_level_analysis_gain', '_subject_id_{subject_id}', 'spmT_0001.nii')] + + # Handle loss files + contrast_list = ['0001', '0002'] + templates += [join( + self.directories.output_dir, + 'subject_level_analysis_loss', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + for contrast_id in contrast_list] + templates += [join( + self.directories.output_dir, + 'subject_level_analysis_loss', '_subject_id_{subject_id}', 'SPM.mat')] + templates += [join( + self.directories.output_dir, + 'subject_level_analysis_loss', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + for contrast_id in contrast_list] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s) for s in self.subject_list] + + return return_list + + def get_group_level_analysis(self): + """ + Return all workflows for the group level analysis. + + Returns; + - a list of nipype.WorkFlow + """ + return_list = [] + + self.model_list = ['gain', 'loss'] + self.contrast_list = ['0001'] + return_list.append(self.get_group_level_analysis_sub_workflow('equalRange')) + return_list.append(self.get_group_level_analysis_sub_workflow('equalIndifference')) + + self.model_list = ['loss'] + self.contrast_list = ['0001'] + return_list.append(self.get_group_level_analysis_sub_workflow('groupComp')) + + self.model_list = ['loss'] + self.contrast_list = ['0002'] + return_list.append(self.get_group_level_analysis_sub_workflow('equalRange')) + return_list.append(self.get_group_level_analysis_sub_workflow('equalIndifference')) + + return return_list + + def get_group_level_analysis_sub_workflow(self, method): + """ + Return a workflow for the group level analysis. + + Parameters: + - method: one of 'equalRange', 'equalIndifference' or 'groupComp' + + Returns: + - group_level_analysis: nipype.WorkFlow + """ + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Infosource - iterate over the list of contrasts + information_source = Node(IdentityInterface( + fields = ['model_type', 'contrast_id']), + name = 'information_source') + information_source.iterables = [ + ('model_type', self.model_list), + ('contrast_id', self.contrast_list) + ] + + # SelectFiles Node + templates = { + # Contrast files for all participants + 'contrasts' : join(self.directories.output_dir, + 'subject_level_analysis_{model_type}', '_subject_id_*', 'con_{contrast_id}.nii' + ) + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + select_files.inputs.force_list = True + + # Datasink - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Create a function to complete the subject ids out from the get_equal_*_subjects nodes + # If not complete, subject id '001' in search patterns + # would match all contrast files with 'con_0001.nii'. + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts', iterfield = 'input_str' + ) + + # Estimate model + estimate_model = Node(EstimateModel(), name = 'estimate_model') + estimate_model.inputs.estimation_method = {'Classical':1} + + # Estimate contrasts + estimate_contrast = Node(EstimateContrast(), name = 'estimate_contrast') + estimate_contrast.inputs.group_contrast = True + + # Create thresholded maps + threshold = MapNode(Threshold(), name = 'threshold', + iterfield = ['stat_image', 'contrast_index']) + threshold.inputs.contrast_index = 1 + threshold.inputs.use_topo_fdr = True + threshold.inputs.use_fwe_correction = False + threshold.inputs.extent_threshold = 0 + threshold.inputs.height_threshold = 0.001 + threshold.inputs.height_threshold_type = 'p-value' + threshold.synchronize = True + + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}') + group_level_analysis.connect([ + (information_source, select_files, [ + ('contrast_id', 'contrast_id'), + ('model_type', 'model_type')]), + (select_files, get_contrasts, [('contrasts', 'input_str')]), + (estimate_model, estimate_contrast, [ + ('spm_mat_file', 'spm_mat_file'), + ('residual_image', 'residual_image'), + ('beta_images', 'beta_images')]), + (estimate_contrast, threshold, [ + ('spm_mat_file', 'spm_mat_file'), + ('spmT_images', 'stat_image')]), + (estimate_model, data_sink, [ + ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), + (estimate_contrast, data_sink, [ + ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), + ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), + ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]), + (threshold, data_sink, [ + ('thresholded_map', f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh')])]) + + if method in ('equalRange', 'equalIndifference'): + estimate_contrast.inputs.contrasts = [ + ('Group', 'T', ['mean'], [1]), + ('Group', 'T', ['mean'], [-1]) + ] + threshold.inputs.contrast_index = [1, 2] + + # Specify design matrix + one_sample_t_test_design = Node(OneSampleTTestDesign(), + name = 'one_sample_t_test_design') + group_level_analysis.connect([ + (get_contrasts, one_sample_t_test_design, [ + (('out_list', clean_list), 'in_files') + ]), + (one_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + if method == 'equalRange': + group_level_analysis.connect([ + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'equalIndifference': + group_level_analysis.connect([ + (get_equal_indifference_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'groupComp': + estimate_contrast.inputs.contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) + ] + threshold.inputs.contrast_index = [1] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts_2 = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts_2', iterfield = 'input_str' + ) + + # Specify design matrix + two_sample_t_test_design = Node(TwoSampleTTestDesign(), + name = 'two_sample_t_test_design') + + group_level_analysis.connect([ + (select_files, get_contrasts_2, [('contrasts', 'input_str')]), + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_equal_indifference_subjects, get_contrasts_2, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_contrasts, two_sample_t_test_design, [ + (('out_list', clean_list), 'group1_files') + ]), + (get_contrasts_2, two_sample_t_test_design, [ + (('out_list', clean_list), 'group2_files') + ]), + (two_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + return group_level_analysis + + def get_group_level_outputs(self): + """ Return all names for the files the group level analysis is supposed to generate. """ + + # Handle equalRange and equalIndifference + + ## Contrast id 0001 + parameters = { + 'method': ['equalRange', 'equalIndifference'], + 'file': [ + 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', + 'spmT_0001.nii', 'spmT_0002.nii', + join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') + ], + 'model_type' : ['gain', 'loss'], + 'nb_subjects' : [str(len(self.subject_list))] + } + + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_0001_model_type_{model_type}', + '{file}' + ) + + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + ## Contrast id 0002 + parameters = { + 'method': ['equalRange', 'equalIndifference'], + 'file': [ + 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', + 'spmT_0001.nii', 'spmT_0002.nii', + join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_0002_model_type_loss', + '{file}' + ) + + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + # Handle groupComp + parameters = { + 'method': ['groupComp'], + 'file': [ + 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', + join('_threshold0', 'spmT_0001_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_0001_model_type_loss', + '{file}' + ) + + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + return return_list + + def get_hypotheses_outputs(self): + """ Return all hypotheses output file names. """ + nb_sub = len(self.subject_list) + files = [ + # Hypothesis 1 + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + # Hypothesis 2 + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + # Hypothesis 3 + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + # Hypothesis 4 + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + # Hypothesis 5 + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', '_threshold1', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', 'spmT_0002.nii'), + # Hypothesis 6 + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', '_threshold1', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', 'spmT_0002.nii'), + # Hypothesis 7 + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', 'spmT_0001.nii'), + # Hypothesis 8 + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', 'spmT_0001.nii'), + # Hypothesis 9 + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0001_model_type_loss', 'spmT_0001.nii') + ] + return [join(self.directories.output_dir, f) for f in files] From c7035ee985dc608afa4c14b3d067812a6cbec7e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 23 Jan 2024 17:48:53 +0100 Subject: [PATCH 09/41] First level of U26C --- narps_open/pipelines/team_U26C.py | 343 +++--------------- tests/pipelines/test_team_U26C.py | 151 ++++++++ .../pipelines/team_U26C/confounds.tsv | 3 + 3 files changed, 210 insertions(+), 287 deletions(-) create mode 100644 tests/pipelines/test_team_U26C.py create mode 100644 tests/test_data/pipelines/team_U26C/confounds.tsv diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index a6f434f8..58c1d2bf 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -50,85 +50,64 @@ def get_run_level_analysis(self): # @staticmethod # Starting python 3.10, staticmethod should be used here # Otherwise it produces a TypeError: 'staticmethod' object is not callable - def get_subject_information(event_files: list, model: str): + def get_subject_information(event_files: list): """ Create Bunchs for SpecifySPMModel. Parameters : - event_files: list of str, list of events files (one per run) for the subject - - model: str, either 'gain' or 'loss' Returns : - subject_information : list of Bunch for 1st level analysis. """ - '''Picks onsets and durations per condition and adds them to lists. - This function specifically picks onsets for the speech vs speaker - where the presentation is clear or in noise. - The function accepts event files. - - 'subject_id' is a string, i.e., sub-001 - ''' + from nipype.interfaces.base import Bunch - cond_names = ['gamble'] - onset = {} - duration = {} + onsets = {} + durations = {} weights_gain = {} weights_loss = {} - runs = ['01', '02', '03', '04'] - - for r in range(len(runs)): # Loop over number of runs. - onset.update({s + '_run' + str(r+1): [] for s in cond_names}) - duration.update({s + '_run' + str(r+1): [] for s in cond_names}) - weights_gain.update({'gain_run' + str(r+1): []}) - weights_loss.update({'loss_run' + str(r+1): []}) - - base_name = '/data/pt_nmc002/other/narps/event_tsvs/' - # subject_id = 'sub-001' - for ir, run in enumerate(runs): - f_events = base_name + subject_id + \ - '_task-MGT_run-' + runs[ir] + '_events.tsv' - with open(f_events, 'rt') as f: - next(f) # skip the header - for line in f: - info = line.strip().split() - for cond in cond_names: - val = cond + '_run' + str(ir+1) - val_gain = 'gain_run' + str(ir+1) - val_loss = 'loss_run' + str(ir+1) - onset[val].append(float(info[0])) - duration[val].append(float(info[1])) - weights_gain[val_gain].append(float(info[2])) - weights_loss[val_loss].append(float(info[3])) - # if cond == 'gain': - # weights[val].append(float(info[2])) - # elif cond == 'loss': - # weights[val].append(float(info[3])) - # elif cond == 'task-activ': - # weights[val].append(float(1)) - from nipype.interfaces.base import Bunch - # Bunching is done per run, i.e. cond1_run1, cond2_run1, etc. - subjectinfo = [] - for r in range(len(runs)): - - cond = [c + '_run' + str(r+1) for c in cond_names] - gain = 'gain_run' + str(r+1) - loss = 'loss_run' + str(r+1) - - subjectinfo.insert(r, - Bunch(conditions=cond, - onsets=[onset[k] for k in cond], - durations=[duration[k] for k in cond], - amplitudes=None, - tmod=None, - pmod=[Bunch(name=[gain, loss], - poly=[1, 1], - param=[weights_gain[gain], - weights_loss[loss]])], - regressor_names=None, - regressors=None)) - - return subjectinfo + subject_info = [] + + for run_id, event_file in enumerate(event_files): + + trial_key = f'gamble_run{run_id + 1}' + gain_key = f'gain_run{run_id + 1}' + loss_key = f'loss_run{run_id + 1}' + + onsets.update({trial_key: []}) + durations.update({trial_key: []}) + weights_gain.update({gain_key: []}) + weights_loss.update({loss_key: []}) + + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + onsets[trial_key].append(float(info[0])) + durations[trial_key].append(float(info[1])) + weights_gain[gain_key].append(float(info[2])) + weights_loss[loss_key].append(float(info[3])) + + # Create a Bunch per run, i.e. cond1_run1, cond2_run1, etc. + subject_info.append( + Bunch( + conditions = [trial_key], + onsets = [onsets[trial_key]], + durations = [durations[trial_key]], + amplitudes = None, + tmod = None, + pmod = [Bunch( + name = [gain_key, loss_key], + poly = [1, 1], + param = [weights_gain[gain_key], weights_loss[loss_key]] + )], + regressor_names = None, + regressors = None + )) + + return subject_info # @staticmethod # Starting python 3.10, staticmethod should be used here # Otherwise it produces a TypeError: 'staticmethod' object is not callable @@ -191,7 +170,7 @@ def get_subject_level_analysis(self): # Identitiy interface Node - to iterate over subject_id and run infosource = Node(interface=IdentityInterface(fields=['subject_id']), name = 'infosource') - infosource.iterables = [('subject_id', subs)] + infosource.iterables = [('subject_id', self.subject_list)] # Select files from derivatives templates = { @@ -210,13 +189,13 @@ def get_subject_level_analysis(self): # Smooth warped functionals. smooth = Node(Smooth(), name = 'smooth') - smooth.inputs.overwrite = False - smooth.iterables = ('fwhm', fwhmlist) + smooth.inputs.fwhm = self.fwhm + smooth.overwrite = False # Function node get_subject_information - get subject specific condition information getsubinforuns = Node(Function( - function = pick_onsets, - input_names = ['subject_id'], + function = self.get_subject_information, + input_names = ['event_files'], output_names = ['subject_info'] ), name = 'getsubinforuns') @@ -231,49 +210,38 @@ def get_subject_level_analysis(self): confounds.inputs.run_id = self.run_list modelspec = Node(SpecifySPMModel(), name = 'modelspec') - modelspec.inputs.overwrite = False modelspec.inputs.concatenate_runs = False modelspec.inputs.input_units = 'secs' modelspec.inputs.output_units = 'secs' modelspec.inputs.time_repetition = TaskInformation()['RepetitionTime'] modelspec.inputs.high_pass_filter_cutoff = 128 + modelspec.overwrite = False level1design = Node(Level1Design(), name = 'level1design') - level1design.inputs.overwrite = False level1design.inputs.bases = {'hrf': {'derivs': [0, 0]}} level1design.inputs.timing_units = 'secs' level1design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + level1design.overwrite = False level1estimate = Node(EstimateModel(), name = 'level1estimate') - level1estimate.inputs.overwrite = False level1estimate.inputs.estimation_method = {'Classical': 1} + level1estimate.overwrite = False contrast_estimate = Node(EstimateContrast(), name = 'contraste_estimate') - contrast_estimate.inputs.overwrite=False, + contrast_estimate.inputs.contrasts = self.subject_level_contrasts contrast_estimate.config = {'execution': {'remove_unnecessary_outputs': False}} - - contrasts = Node(Function( - function = con_setup, - input_names = ['subject_id'], - output_names = ['contrasts'] - ), - name = 'contrasts') + contrast_estimate.overwrite = False subject_level_analysis = Workflow( base_dir = self.directories.working_dir, name = 'subject_level_analysis' ) subject_level_analysis.connect([ (infosource, selectderivs, [('subject_id', 'subject_id')]), - (infosource, contrasts, [('subject_id', 'subject_id')]), - (infosource, getsubinforuns, [('subject_id', 'subject_id')]), + (infosource, getsubinforuns, [('events', 'event_files')]), (infosource, confounds, [('subject_id', 'subject_id')]), (selectderivs, gunzip, [('func', 'in_file')]), (selectderivs, confounds, [('confounds', 'filepath')]), (gunzip, smooth, [('out_file', 'in_files')]), - (contrasts, contrast_estimate, [('contrasts', 'contrasts')]), - (contrast_estimate, selectcontrast, [('con_images', 'inlist')]), - (selectcontrast, overlaystats, [('out', 'stat_image')]), - (overlaystats, slicestats, [('out_file', 'in_file')]), (getsubinforuns, modelspec, [('subject_info', 'subject_info')]), (confounds, modelspec, [('confounds_file', 'realignment_parameters')]), (smooth, modelspec, [('smoothed_files', 'functional_runs')]), @@ -329,23 +297,6 @@ def get_group_level_analysis(self): Returns; - a list of nipype.WorkFlow """ - return_list = [] - - self.model_list = ['gain', 'loss'] - self.contrast_list = ['0001'] - return_list.append(self.get_group_level_analysis_sub_workflow('equalRange')) - return_list.append(self.get_group_level_analysis_sub_workflow('equalIndifference')) - - self.model_list = ['loss'] - self.contrast_list = ['0001'] - return_list.append(self.get_group_level_analysis_sub_workflow('groupComp')) - - self.model_list = ['loss'] - self.contrast_list = ['0002'] - return_list.append(self.get_group_level_analysis_sub_workflow('equalRange')) - return_list.append(self.get_group_level_analysis_sub_workflow('equalIndifference')) - - return return_list def get_group_level_analysis_sub_workflow(self, method): """ @@ -357,188 +308,6 @@ def get_group_level_analysis_sub_workflow(self, method): Returns: - group_level_analysis: nipype.WorkFlow """ - # Compute the number of participants used to do the analysis - nb_subjects = len(self.subject_list) - - # Infosource - iterate over the list of contrasts - information_source = Node(IdentityInterface( - fields = ['model_type', 'contrast_id']), - name = 'information_source') - information_source.iterables = [ - ('model_type', self.model_list), - ('contrast_id', self.contrast_list) - ] - - # SelectFiles Node - templates = { - # Contrast files for all participants - 'contrasts' : join(self.directories.output_dir, - 'subject_level_analysis_{model_type}', '_subject_id_*', 'con_{contrast_id}.nii' - ) - } - select_files = Node(SelectFiles(templates), name = 'select_files') - select_files.inputs.base_directory = self.directories.dataset_dir - select_files.inputs.force_list = True - - # Datasink - save important files - data_sink = Node(DataSink(), name = 'data_sink') - data_sink.inputs.base_directory = self.directories.output_dir - - # Function Node get_equal_range_subjects - # Get subjects in the equalRange group and in the subject_list - get_equal_range_subjects = Node(Function( - function = list_intersection, - input_names = ['list_1', 'list_2'], - output_names = ['out_list'] - ), - name = 'get_equal_range_subjects' - ) - get_equal_range_subjects.inputs.list_1 = get_group('equalRange') - get_equal_range_subjects.inputs.list_2 = self.subject_list - - # Function Node get_equal_indifference_subjects - # Get subjects in the equalIndifference group and in the subject_list - get_equal_indifference_subjects = Node(Function( - function = list_intersection, - input_names = ['list_1', 'list_2'], - output_names = ['out_list'] - ), - name = 'get_equal_indifference_subjects' - ) - get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') - get_equal_indifference_subjects.inputs.list_2 = self.subject_list - - # Create a function to complete the subject ids out from the get_equal_*_subjects nodes - # If not complete, subject id '001' in search patterns - # would match all contrast files with 'con_0001.nii'. - complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] - - # Function Node elements_in_string - # Get contrast files for required subjects - # Note : using a MapNode with elements_in_string requires using clean_list to remove - # None values from the out_list - get_contrasts = MapNode(Function( - function = elements_in_string, - input_names = ['input_str', 'elements'], - output_names = ['out_list'] - ), - name = 'get_contrasts', iterfield = 'input_str' - ) - - # Estimate model - estimate_model = Node(EstimateModel(), name = 'estimate_model') - estimate_model.inputs.estimation_method = {'Classical':1} - - # Estimate contrasts - estimate_contrast = Node(EstimateContrast(), name = 'estimate_contrast') - estimate_contrast.inputs.group_contrast = True - - # Create thresholded maps - threshold = MapNode(Threshold(), name = 'threshold', - iterfield = ['stat_image', 'contrast_index']) - threshold.inputs.contrast_index = 1 - threshold.inputs.use_topo_fdr = True - threshold.inputs.use_fwe_correction = False - threshold.inputs.extent_threshold = 0 - threshold.inputs.height_threshold = 0.001 - threshold.inputs.height_threshold_type = 'p-value' - threshold.synchronize = True - - group_level_analysis = Workflow( - base_dir = self.directories.working_dir, - name = f'group_level_analysis_{method}_nsub_{nb_subjects}') - group_level_analysis.connect([ - (information_source, select_files, [ - ('contrast_id', 'contrast_id'), - ('model_type', 'model_type')]), - (select_files, get_contrasts, [('contrasts', 'input_str')]), - (estimate_model, estimate_contrast, [ - ('spm_mat_file', 'spm_mat_file'), - ('residual_image', 'residual_image'), - ('beta_images', 'beta_images')]), - (estimate_contrast, threshold, [ - ('spm_mat_file', 'spm_mat_file'), - ('spmT_images', 'stat_image')]), - (estimate_model, data_sink, [ - ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), - (estimate_contrast, data_sink, [ - ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), - ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), - ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]), - (threshold, data_sink, [ - ('thresholded_map', f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh')])]) - - if method in ('equalRange', 'equalIndifference'): - estimate_contrast.inputs.contrasts = [ - ('Group', 'T', ['mean'], [1]), - ('Group', 'T', ['mean'], [-1]) - ] - threshold.inputs.contrast_index = [1, 2] - - # Specify design matrix - one_sample_t_test_design = Node(OneSampleTTestDesign(), - name = 'one_sample_t_test_design') - group_level_analysis.connect([ - (get_contrasts, one_sample_t_test_design, [ - (('out_list', clean_list), 'in_files') - ]), - (one_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) - ]) - - if method == 'equalRange': - group_level_analysis.connect([ - (get_equal_range_subjects, get_contrasts, [ - (('out_list', complete_subject_ids), 'elements') - ]) - ]) - - elif method == 'equalIndifference': - group_level_analysis.connect([ - (get_equal_indifference_subjects, get_contrasts, [ - (('out_list', complete_subject_ids), 'elements') - ]) - ]) - - elif method == 'groupComp': - estimate_contrast.inputs.contrasts = [ - ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) - ] - threshold.inputs.contrast_index = [1] - - # Function Node elements_in_string - # Get contrast files for required subjects - # Note : using a MapNode with elements_in_string requires using clean_list to remove - # None values from the out_list - get_contrasts_2 = MapNode(Function( - function = elements_in_string, - input_names = ['input_str', 'elements'], - output_names = ['out_list'] - ), - name = 'get_contrasts_2', iterfield = 'input_str' - ) - - # Specify design matrix - two_sample_t_test_design = Node(TwoSampleTTestDesign(), - name = 'two_sample_t_test_design') - - group_level_analysis.connect([ - (select_files, get_contrasts_2, [('contrasts', 'input_str')]), - (get_equal_range_subjects, get_contrasts, [ - (('out_list', complete_subject_ids), 'elements') - ]), - (get_equal_indifference_subjects, get_contrasts_2, [ - (('out_list', complete_subject_ids), 'elements') - ]), - (get_contrasts, two_sample_t_test_design, [ - (('out_list', clean_list), 'group1_files') - ]), - (get_contrasts_2, two_sample_t_test_design, [ - (('out_list', clean_list), 'group2_files') - ]), - (two_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) - ]) - - return group_level_analysis def get_group_level_outputs(self): """ Return all names for the files the group level analysis is supposed to generate. """ diff --git a/tests/pipelines/test_team_U26C.py b/tests/pipelines/test_team_U26C.py new file mode 100644 index 00000000..b0971e07 --- /dev/null +++ b/tests/pipelines/test_team_U26C.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'narps_open.pipelines.team_U26C' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_team_U26C.py + pytest -q test_team_U26C.py -k +""" +from os import mkdir +from os.path import join, exists +from shutil import rmtree +from filecmp import cmp + +from pytest import helpers, mark, fixture +from numpy import isclose +from nipype import Workflow +from nipype.interfaces.base import Bunch + +from narps_open.utils.configuration import Configuration +from narps_open.pipelines.team_U26C import PipelineTeamU26C + +TEMPORARY_DIR = join(Configuration()['directories']['test_runs'], 'test_U26C') + +@fixture +def remove_test_dir(): + """ A fixture to remove temporary directory created by tests """ + + rmtree(TEMPORARY_DIR, ignore_errors = True) + mkdir(TEMPORARY_DIR) + yield # test runs here + #rmtree(TEMPORARY_DIR, ignore_errors = True) + +def compare_float_2d_arrays(array_1, array_2): + """ Assert array_1 and array_2 are close enough """ + + assert len(array_1) == len(array_2) + for reference_array, test_array in zip(array_1, array_2): + assert len(reference_array) == len(test_array) + assert isclose(reference_array, test_array).all() + +class TestPipelinesTeamU26C: + """ A class that contains all the unit tests for the PipelineTeamU26C class.""" + + @staticmethod + @mark.unit_test + def test_create(): + """ Test the creation of a PipelineTeamU26C object """ + + pipeline = PipelineTeamU26C() + + # 1 - check the parameters + assert pipeline.fwhm == 5.0 + assert pipeline.team_id == 'U26C' + + # 2 - check workflows + assert pipeline.get_preprocessing() is None + assert pipeline.get_run_level_analysis() is None + assert isinstance(pipeline.get_subject_level_analysis(), Workflow) + group_level = pipeline.get_group_level_analysis() + + """assert len(group_level) == 3 + for sub_workflow in group_level: + assert isinstance(sub_workflow, Workflow)""" + + @staticmethod + @mark.unit_test + def test_outputs(): + """ Test the expected outputs of a PipelineTeamU26C object """ + pipeline = PipelineTeamU26C() + # 1 - 1 subject outputs + """pipeline.subject_list = ['001'] + assert len(pipeline.get_preprocessing_outputs()) == 0 + assert len(pipeline.get_run_level_outputs()) == 0 + assert len(pipeline.get_subject_level_outputs()) == 7 + assert len(pipeline.get_group_level_outputs()) == 63 + assert len(pipeline.get_hypotheses_outputs()) == 18 + + # 2 - 4 subjects outputs + pipeline.subject_list = ['001', '002', '003', '004'] + assert len(pipeline.get_preprocessing_outputs()) == 0 + assert len(pipeline.get_run_level_outputs()) == 0 + assert len(pipeline.get_subject_level_outputs()) == 28 + assert len(pipeline.get_group_level_outputs()) == 63 + assert len(pipeline.get_hypotheses_outputs()) == 18""" + + @staticmethod + @mark.unit_test + def test_subject_information(): + """ Test the get_subject_information method """ + + # Get test files + test_file = join(Configuration()['directories']['test_data'], 'pipelines', 'events.tsv') + info = PipelineTeamU26C.get_subject_information([test_file, test_file]) + + # Compare bunches to expected + bunch = info[0] + assert isinstance(bunch, Bunch) + assert bunch.conditions == ['gamble_run1'] + compare_float_2d_arrays(bunch.onsets, [[4.071, 11.834, 19.535, 27.535, 36.435]]) + compare_float_2d_arrays(bunch.durations, [[4.0, 4.0, 4.0, 4.0, 4.0]]) + assert bunch.amplitudes == None + assert bunch.tmod == None + assert bunch.pmod[0].name == ['gain_run1', 'loss_run1'] + assert bunch.pmod[0].poly == [1, 1] + compare_float_2d_arrays(bunch.pmod[0].param, [[14.0, 34.0, 38.0, 10.0, 16.0], [6.0, 14.0, 19.0, 15.0, 17.0]]) + assert bunch.regressor_names == None + assert bunch.regressors == None + + bunch = info[1] + assert isinstance(bunch, Bunch) + assert bunch.conditions == ['gamble_run2'] + compare_float_2d_arrays(bunch.onsets, [[4.071, 11.834, 19.535, 27.535, 36.435]]) + compare_float_2d_arrays(bunch.durations, [[4.0, 4.0, 4.0, 4.0, 4.0]]) + assert bunch.amplitudes == None + assert bunch.tmod == None + assert bunch.pmod[0].name == ['gain_run2', 'loss_run2'] + assert bunch.pmod[0].poly == [1, 1] + compare_float_2d_arrays(bunch.pmod[0].param, [[14.0, 34.0, 38.0, 10.0, 16.0], [6.0, 14.0, 19.0, 15.0, 17.0]]) + assert bunch.regressor_names == None + assert bunch.regressors == None + + @staticmethod + @mark.unit_test + def test_confounds_file(remove_test_dir): + """ Test the get_confounds_file method """ + + confounds_file = join( + Configuration()['directories']['test_data'], 'pipelines', 'confounds.tsv') + reference_file = join( + Configuration()['directories']['test_data'], 'pipelines', 'team_U26C', 'confounds.tsv') + + # Get new confounds file + PipelineTeamU26C.get_confounds_file(confounds_file, 'sid', 'rid', TEMPORARY_DIR) + + # Check confounds file was created + created_confounds_file = join( + TEMPORARY_DIR, 'confounds_files', 'confounds_file_sub-sid_run-rid.tsv') + assert exists(created_confounds_file) + + # Check contents + assert cmp(reference_file, created_confounds_file) + + @staticmethod + @mark.pipeline_test + def test_execution(): + """ Test the execution of a PipelineTeamU26C and compare results """ + helpers.test_pipeline_evaluation('U26C') diff --git a/tests/test_data/pipelines/team_U26C/confounds.tsv b/tests/test_data/pipelines/team_U26C/confounds.tsv new file mode 100644 index 00000000..05925432 --- /dev/null +++ b/tests/test_data/pipelines/team_U26C/confounds.tsv @@ -0,0 +1,3 @@ +6551.281999999999 6476.4653 0.0 0.0 0.0 0.0 -0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 +6484.7285 6473.4890000000005 -0.00996895 -0.0313444 -3.00931e-06 0.00132687 -0.000384193 -0.00016819 -0.00996895 -0.0313444 -3.00931e-06 0.00132687 -0.000384193 -0.00016819 +6441.5337 6485.7256 -2.56954e-05 -0.00923735 0.0549667 0.000997278 -0.00019745 -0.000398988 0.009943254600000001 0.022107050000000003 0.05496970931 -0.00032959199999999986 0.000186743 -0.00023079800000000002 From 4d0a6ecde1603cc6e06b16446357ee90b471e594 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 23 Jan 2024 17:56:18 +0100 Subject: [PATCH 10/41] First level of U26C --- narps_open/pipelines/__init__.py | 2 +- narps_open/pipelines/team_U26C.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index e0dba921..dc8ca6f9 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -69,7 +69,7 @@ 'R9K3': None, 'SM54': None, 'T54A': None, - 'U26C': None, + 'U26C': 'PipelineTeamU26C', 'UI76': None, 'UK24': None, 'V55J': None, diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 58c1d2bf..be002afd 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -167,7 +167,7 @@ def get_subject_level_analysis(self): Returns: - subject_level_analysis : nipype.WorkFlow """ - # Identitiy interface Node - to iterate over subject_id and run + # Identity interface Node - to iterate over subject_id and run infosource = Node(interface=IdentityInterface(fields=['subject_id']), name = 'infosource') infosource.iterables = [('subject_id', self.subject_list)] From 623118da29b852d367e523b382a96f2bab59614e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 23 Jan 2024 18:17:45 +0100 Subject: [PATCH 11/41] First level of U26C --- narps_open/pipelines/team_U26C.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index be002afd..dc33bbb9 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -237,8 +237,8 @@ def get_subject_level_analysis(self): ) subject_level_analysis.connect([ (infosource, selectderivs, [('subject_id', 'subject_id')]), - (infosource, getsubinforuns, [('events', 'event_files')]), (infosource, confounds, [('subject_id', 'subject_id')]), + (selectderivs, getsubinforuns, [('events', 'event_files')]), (selectderivs, gunzip, [('func', 'in_file')]), (selectderivs, confounds, [('confounds', 'filepath')]), (gunzip, smooth, [('out_file', 'in_files')]), From 444cfc3467f67c3ecb318e99f0a78176caf80440 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 09:11:24 +0100 Subject: [PATCH 12/41] Select files node error --- narps_open/pipelines/team_U26C.py | 1 + 1 file changed, 1 insertion(+) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index dc33bbb9..b9481c2f 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -182,6 +182,7 @@ def get_subject_level_analysis(self): '{subject_id}_task-MGT_run-*_events.tsv') } selectderivs = Node(SelectFiles(templates), name = 'selectderivs') + selectderivs.inputs.base_directory = self.directories.dataset_dir selectderivs.inputs.sort_filelist = True # Gunzip - gunzip files because SPM do not use .nii.gz files From 53cfa23cbd81eeb7ee39dc962daa0068b0c44982 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 09:20:02 +0100 Subject: [PATCH 13/41] Select files node error --- narps_open/pipelines/team_U26C.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index b9481c2f..6874882e 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -174,11 +174,11 @@ def get_subject_level_analysis(self): # Select files from derivatives templates = { - 'func': join('derivatives', 'fmriprep', '{subject_id}', 'func', + 'func': join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'), 'confounds' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_bold_confounds.tsv'), - 'events': join('derivatives', 'fmriprep', + 'events': join('sub-{subject_id}', 'func', '{subject_id}_task-MGT_run-*_events.tsv') } selectderivs = Node(SelectFiles(templates), name = 'selectderivs') From c7a1462eba2e9ded90a939922b240e9bfcdee0ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 10:41:33 +0100 Subject: [PATCH 14/41] Select events files --- narps_open/pipelines/team_U26C.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 6874882e..76645ce2 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -179,7 +179,7 @@ def get_subject_level_analysis(self): 'confounds' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_bold_confounds.tsv'), 'events': join('sub-{subject_id}', 'func', - '{subject_id}_task-MGT_run-*_events.tsv') + 'sub-{subject_id}_task-MGT_run-*_events.tsv') } selectderivs = Node(SelectFiles(templates), name = 'selectderivs') selectderivs.inputs.base_directory = self.directories.dataset_dir From fc01b4045d84e44b72a220f1622d6e1c4610a331 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 11:57:45 +0100 Subject: [PATCH 15/41] Runner configuration --- narps_open/runner.py | 10 +++++++--- narps_open/utils/configuration/default_config.toml | 3 +++ narps_open/utils/configuration/testing_config.toml | 3 +++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/narps_open/runner.py b/narps_open/runner.py index 32c80180..ef70badd 100644 --- a/narps_open/runner.py +++ b/narps_open/runner.py @@ -8,7 +8,7 @@ from random import choices from argparse import ArgumentParser -from nipype import Workflow +from nipype import Workflow, config from narps_open.pipelines import Pipeline, implemented_pipelines from narps_open.data.participants import ( @@ -95,6 +95,10 @@ def start(self, first_level_only: bool = False, group_level_only: bool = False) (= preprocessing + run level + subject_level) - group_level_only: bool (False by default), run the group level workflows only """ + # Set global nipype config for pipeline execution + config.update_config(dict(execution = {'stop_on_first_crash': False})) + + # Disclaimer print('Starting pipeline for team: '+ f'{self.team_id}, with {len(self.subjects)} subjects: {self.subjects}') @@ -126,7 +130,7 @@ def start(self, first_level_only: bool = False, group_level_only: bool = False) raise AttributeError('Workflow must be of type nipype.Workflow') if nb_procs > 1: - sub_workflow.run('MultiProc', plugin_args={'n_procs': nb_procs}) + sub_workflow.run('MultiProc', plugin_args = {'n_procs': nb_procs}) else: sub_workflow.run() else: @@ -134,7 +138,7 @@ def start(self, first_level_only: bool = False, group_level_only: bool = False) raise AttributeError('Workflow must be of type nipype.Workflow') if nb_procs > 1: - workflow.run('MultiProc', plugin_args={'n_procs': nb_procs}) + workflow.run('MultiProc', plugin_args = {'n_procs': nb_procs}) else: workflow.run() diff --git a/narps_open/utils/configuration/default_config.toml b/narps_open/utils/configuration/default_config.toml index 24bdd98a..81f312a9 100644 --- a/narps_open/utils/configuration/default_config.toml +++ b/narps_open/utils/configuration/default_config.toml @@ -10,5 +10,8 @@ narps_results = "data/results/" [runner] nb_procs = 8 # Maximum number of threads executed by the runner +[pipelines] +remove_unused_data = true # set to true to activate remove nodes of pipelines + [results] neurovault_naming = true # true if results files are saved using the neurovault naming, false if they use naming of narps diff --git a/narps_open/utils/configuration/testing_config.toml b/narps_open/utils/configuration/testing_config.toml index 40733c5a..4d9cb110 100644 --- a/narps_open/utils/configuration/testing_config.toml +++ b/narps_open/utils/configuration/testing_config.toml @@ -13,6 +13,9 @@ test_runs = "run/" nb_procs = 8 # Maximum number of threads executed by the runner nb_trials = 3 # Maximum number of executions to have the pipeline executed completely +[pipelines] +remove_unused_data = true # set to true to activate remove nodes of pipelines + [results] neurovault_naming = true # true if results files are saved using the neurovault naming, false if they use naming of narps From f93ae586a0337a560b63ad2dbebc9c7218e81d8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 13:36:49 +0100 Subject: [PATCH 16/41] Remove dir func in --- narps_open/core/common.py | 14 ++++++++++++++ tests/core/test_common.py | 30 +++++++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/narps_open/core/common.py b/narps_open/core/common.py index c40f2907..f0d801ea 100644 --- a/narps_open/core/common.py +++ b/narps_open/core/common.py @@ -20,6 +20,20 @@ def remove_file(_, file_name: str) -> None: except OSError as error: print(error) +def remove_directory(_, directory_name: str) -> None: + """ + Fully remove directory generated by a Node, once it is not needed anymore. + This function is meant to be used in a Nipype Function Node. + + Parameters: + - _: input only used for triggering the Node + - directory_name: str, a single absolute path of the directory to remove + """ + # This import must stay inside the function, as required by Nipype + from shutil import rmtree + + rmtree(directory_name, ignore_errors = True) + def elements_in_string(input_str: str, elements: list) -> str: #| None: """ Return input_str if it contains one element of the elements list. diff --git a/tests/core/test_common.py b/tests/core/test_common.py index 64c385e9..bc5962fd 100644 --- a/tests/core/test_common.py +++ b/tests/core/test_common.py @@ -10,7 +10,7 @@ pytest -q test_common.py pytest -q test_common.py -k """ -from os import mkdir +from os import mkdir, makedirs from os.path import join, exists, abspath from shutil import rmtree from pathlib import Path @@ -59,6 +59,34 @@ def test_remove_file(remove_test_dir): # Check file is removed assert not exists(test_file_path) + + @staticmethod + @mark.unit_test + def test_remove_directory(remove_test_dir): + """ Test the remove_directory function """ + + # Create a single inside dir tree + dir_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2')) + makedirs(dir_path) + file_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2', 'file1.txt')) + Path(file_path).touch() + test_dir_path = abspath(join(TEMPORARY_DIR, 'dir_1')) + + # Check file exist + assert exists(file_path) + + # Create a Nipype Node using remove_files + test_remove_dir_node = Node(Function( + function = co.remove_directory, + input_names = ['_', 'directory_name'], + output_names = [] + ), name = 'test_remove_dir_node') + test_remove_dir_node.inputs._ = '' + test_remove_dir_node.inputs.directory_name = test_dir_path + test_remove_dir_node.run() + + # Check file is removed + assert not exists(test_dir_path) @staticmethod @mark.unit_test From f02e6cc2d657c025b543e997788040e04a1ce610 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 14:10:28 +0100 Subject: [PATCH 17/41] Runner always stops on first crash --- narps_open/runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/runner.py b/narps_open/runner.py index ef70badd..fb472eb9 100644 --- a/narps_open/runner.py +++ b/narps_open/runner.py @@ -96,7 +96,7 @@ def start(self, first_level_only: bool = False, group_level_only: bool = False) - group_level_only: bool (False by default), run the group level workflows only """ # Set global nipype config for pipeline execution - config.update_config(dict(execution = {'stop_on_first_crash': False})) + config.update_config(dict(execution = {'stop_on_first_crash': 'True'})) # Disclaimer print('Starting pipeline for team: '+ From 060b64fca7183d4738cc3da461e84a77c62925b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 14:14:20 +0100 Subject: [PATCH 18/41] [TEST][helpers] not failing test if correlation under threshold --- tests/conftest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 42badb65..14275bec 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -158,4 +158,5 @@ def test_pipeline_evaluation(team_id: str): file.write('success' if passed else 'failure') file.write(f' | {[round(i, 2) for i in results]} |\n') - assert passed + if not passed: + break From 416ec56bf685f29527827266d442ed6115e7d278 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 24 Jan 2024 16:10:13 +0100 Subject: [PATCH 19/41] [DOC] narps_open.core.common --- .github/workflows/test_changes.yml | 2 +- docs/core.md | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_changes.yml b/.github/workflows/test_changes.yml index e0608011..d411d20b 100644 --- a/.github/workflows/test_changes.yml +++ b/.github/workflows/test_changes.yml @@ -58,5 +58,5 @@ jobs: - name: Execute tests with pytest run: | if [[ "${{ needs.identify-tests.outputs.tests }}" != "" ]]; then - pytest -s -q ${{ needs.identify-tests.outputs.tests }} + pytest -s -q ${{ needs.identify-tests.outputs.tests }} -m "not pipeline_test" fi diff --git a/docs/core.md b/docs/core.md index 2ea8e536..311faea4 100644 --- a/docs/core.md +++ b/docs/core.md @@ -72,6 +72,15 @@ from narps_open.core.common import remove_file remove_file('/path/to/the/image.nii.gz') ``` +* `remove_directory` remove a directory when it is not needed anymore (to save disk space) + +```python +from narps_open.core.common import remove_directory + +# Remove the directory /path/to/ +remove_directory('/path/to/') +``` + * `elements_in_string` : return the first input parameter if it contains one element of second parameter (None otherwise). ```python From 88b4aa2dc70ca65f4e92e7d692377930af8e5a7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 11:43:48 +0100 Subject: [PATCH 20/41] [ENH][TEST] narps_open.core.nodes module --- narps_open/core/nodes.py | 41 +++++++++++++++++++++ tests/core/test_nodes.py | 78 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+) create mode 100644 narps_open/core/nodes.py create mode 100644 tests/core/test_nodes.py diff --git a/narps_open/core/nodes.py b/narps_open/core/nodes.py new file mode 100644 index 00000000..58438015 --- /dev/null +++ b/narps_open/core/nodes.py @@ -0,0 +1,41 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Generate useful and recurrent nodes to write pipelines """ + +from abc import ABC, abstractmethod + +from nipype import Node +from nipype.interfaces.utility import Function + +from narps_open.core.common import remove_directory, remove_file + +class NodeCreator(ABC): + """ An abstract class to shape what node creators must provide """ + + @abstractmethod + def create_node(self, name: str) -> Node: + """ Return a new Node (the interface of the Node is defined by specialized classes) + Arguments: + name, str : the name of the node + """ + +class RemoveDirectoryNodeCreator(NodeCreator): + """ A node creator that provides an interface allowing to remove a directory """ + + def create_node(self, name: str) -> Node: + return Node(Function( + function = remove_directory, + input_names = ['_', 'directory_name'], + output_names = [] + ), name = name) + +class RemoveFileNodeCreator(NodeCreator): + """ A node creator that provides an interface allowing to remove a file """ + + def create_node(self, name: str) -> Node: + return Node(Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ), name = name) diff --git a/tests/core/test_nodes.py b/tests/core/test_nodes.py new file mode 100644 index 00000000..46c11fff --- /dev/null +++ b/tests/core/test_nodes.py @@ -0,0 +1,78 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'narps_open.core.nodes' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_nodes.py + pytest -q test_nodes.py -k +""" + +from pytest import mark, raises + +from nipype import Node +from nipype.interfaces.utility import Select, Function + +import narps_open.core.nodes as nd +from narps_open.core.common import remove_directory, remove_file + +class TestNodeCreator: + """ A class that contains all the unit tests for the NodeCreator class.""" + + @staticmethod + @mark.unit_test + def test_create_node(): + """ Test the create_node method """ + + # It is not possible to create an instance of a NodeCreator + with raises(Exception): + nd.NodeCreator().create_node('node_name') + + # Define a child for NodeCreator + class ErrorNC(nd.NodeCreator): + def random_method(self): + pass + + # Test it cannot be instanciated + with raises(Exception): + ErrorNC().create_node('node_name') + + # Define another child for NodeCreator + class ValidNC(nd.NodeCreator): + def create_node(self, name: str) -> Node: + return Node(Select(), name = name) + + # Test it can be instanciated + test_node = ValidNC().create_node('node_name') + assert isinstance(test_node, Node) + assert isinstance(test_node.interface, Select) + assert test_node.name == 'node_name' + +class TestRemoveDirectoryNodeCreator: + """ A class that contains all the unit tests for the RemoveDirectoryNodeCreator class.""" + + @staticmethod + @mark.unit_test + def test_create_node(): + """ Test the create_node method """ + + test_node = nd.RemoveDirectoryNodeCreator().create_node('node_name') + assert isinstance(test_node, Node) + assert isinstance(test_node.interface, Function) + assert test_node.name == 'node_name' + +class TestRemoveFileNodeCreator: + """ A class that contains all the unit tests for the RemoveFileNodeCreator class.""" + + @staticmethod + @mark.unit_test + def test_create_node(): + """ Test the create_node method """ + + test_node = nd.RemoveFileNodeCreator().create_node('node_name') + assert isinstance(test_node, Node) + assert isinstance(test_node.interface, Function) + assert test_node.name == 'node_name' From 6f2047c9d581ce22dee4d68b7c2917685c50e171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 13:50:34 +0100 Subject: [PATCH 21/41] [ENH][DOC] node generators in core module --- docs/core.md | 43 ++++++++++++++++++++++++++++++++++++++++ narps_open/core/nodes.py | 9 ++++++--- tests/core/test_nodes.py | 23 +++++---------------- 3 files changed, 54 insertions(+), 21 deletions(-) diff --git a/docs/core.md b/docs/core.md index 311faea4..2097c998 100644 --- a/docs/core.md +++ b/docs/core.md @@ -124,3 +124,46 @@ This module contains a set of functions dedicated to computations on images. # Get dimensions of voxels along x, y, and z in mm (returns e.g.: [1.0, 1.0, 1.0]). get_voxel_dimensions('/path/to/the/image.nii.gz') ``` + +## narps_open.core.nodes + +This module contains a set of node creators inheriting form the `narps_open.core.nodes.NodeCreator` abstract class. +These are responsible for creating nipype `Node` objects (for now, only based on the `Function` interface, with functions defined in the `narps_open.core.common` module) to be used inside pipeline code. This allows to factorize code, hence making code simpler to read inside pipeline definition. + +Here is an example how to use the node creators : + +```python +from narps_open.core.nodes import RemoveDirectoryNodeCreator, RemoveFileNodeCreator + +# Create a Node to remove a directory +remove_smoothed = RemoveDirectoryNodeCreator.create_node('remove_smoothed') +remove_smoothed.inputs.directory_name = 'my_directory' + +# Create a Node to remove a file +remove_gunzip = RemoveFileNodeCreator.create_node('remove_gunzip') +remove_gunzip.inputs.file_name = 'my_file' +``` + +For your information, this is how an equivalent code would look like without node creators. + +```python +from nipype import Node +from nipype.interfaces.utility import Function +from narps_open.core.common import remove_directory, remove_file + +# Create a Node to remove a directory +remove_smoothed = Node(Function( + function = remove_directory, + input_names = ['_', 'directory_name'], + output_names = [] + ), name = 'remove_smoothed') +remove_smoothed.inputs.directory_name = 'my_directory' + +# Create a Node to remove a file +remove_gunzip = Node(Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_gunzip') +remove_gunzip.inputs.file_name = 'my_file' +``` diff --git a/narps_open/core/nodes.py b/narps_open/core/nodes.py index 58438015..c28480fd 100644 --- a/narps_open/core/nodes.py +++ b/narps_open/core/nodes.py @@ -13,8 +13,9 @@ class NodeCreator(ABC): """ An abstract class to shape what node creators must provide """ + @staticmethod @abstractmethod - def create_node(self, name: str) -> Node: + def create_node(name: str) -> Node: """ Return a new Node (the interface of the Node is defined by specialized classes) Arguments: name, str : the name of the node @@ -23,7 +24,8 @@ def create_node(self, name: str) -> Node: class RemoveDirectoryNodeCreator(NodeCreator): """ A node creator that provides an interface allowing to remove a directory """ - def create_node(self, name: str) -> Node: + @staticmethod + def create_node(name: str) -> Node: return Node(Function( function = remove_directory, input_names = ['_', 'directory_name'], @@ -33,7 +35,8 @@ def create_node(self, name: str) -> Node: class RemoveFileNodeCreator(NodeCreator): """ A node creator that provides an interface allowing to remove a file """ - def create_node(self, name: str) -> Node: + @staticmethod + def create_node(name: str) -> Node: return Node(Function( function = remove_file, input_names = ['_', 'file_name'], diff --git a/tests/core/test_nodes.py b/tests/core/test_nodes.py index 46c11fff..777095fe 100644 --- a/tests/core/test_nodes.py +++ b/tests/core/test_nodes.py @@ -27,26 +27,13 @@ class TestNodeCreator: def test_create_node(): """ Test the create_node method """ - # It is not possible to create an instance of a NodeCreator - with raises(Exception): - nd.NodeCreator().create_node('node_name') - - # Define a child for NodeCreator - class ErrorNC(nd.NodeCreator): - def random_method(self): - pass - - # Test it cannot be instanciated - with raises(Exception): - ErrorNC().create_node('node_name') - # Define another child for NodeCreator class ValidNC(nd.NodeCreator): - def create_node(self, name: str) -> Node: + def create_node(name: str) -> Node: return Node(Select(), name = name) - # Test it can be instanciated - test_node = ValidNC().create_node('node_name') + # Test it can be instantiated + test_node = ValidNC.create_node('node_name') assert isinstance(test_node, Node) assert isinstance(test_node.interface, Select) assert test_node.name == 'node_name' @@ -59,7 +46,7 @@ class TestRemoveDirectoryNodeCreator: def test_create_node(): """ Test the create_node method """ - test_node = nd.RemoveDirectoryNodeCreator().create_node('node_name') + test_node = nd.RemoveDirectoryNodeCreator.create_node('node_name') assert isinstance(test_node, Node) assert isinstance(test_node.interface, Function) assert test_node.name == 'node_name' @@ -72,7 +59,7 @@ class TestRemoveFileNodeCreator: def test_create_node(): """ Test the create_node method """ - test_node = nd.RemoveFileNodeCreator().create_node('node_name') + test_node = nd.RemoveFileNodeCreator.create_node('node_name') assert isinstance(test_node, Node) assert isinstance(test_node.interface, Function) assert test_node.name == 'node_name' From 2cb35a3a5225b7279d524554d3c70e0f6f4b8f66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 14:00:52 +0100 Subject: [PATCH 22/41] [PEP8][SPELL] node generators in core module --- tests/core/test_nodes.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/core/test_nodes.py b/tests/core/test_nodes.py index 777095fe..cc7a49d2 100644 --- a/tests/core/test_nodes.py +++ b/tests/core/test_nodes.py @@ -11,13 +11,12 @@ pytest -q test_nodes.py -k """ -from pytest import mark, raises +from pytest import mark from nipype import Node from nipype.interfaces.utility import Select, Function -import narps_open.core.nodes as nd -from narps_open.core.common import remove_directory, remove_file +from narps_open.core import nodes class TestNodeCreator: """ A class that contains all the unit tests for the NodeCreator class.""" @@ -28,8 +27,12 @@ def test_create_node(): """ Test the create_node method """ # Define another child for NodeCreator - class ValidNC(nd.NodeCreator): + class ValidNC(nodes.NodeCreator): + """ A valid implementation of a NodeCreator """ + + @staticmethod def create_node(name: str) -> Node: + """ Return a Node, as expected """ return Node(Select(), name = name) # Test it can be instantiated @@ -46,7 +49,7 @@ class TestRemoveDirectoryNodeCreator: def test_create_node(): """ Test the create_node method """ - test_node = nd.RemoveDirectoryNodeCreator.create_node('node_name') + test_node = nodes.RemoveDirectoryNodeCreator.create_node('node_name') assert isinstance(test_node, Node) assert isinstance(test_node.interface, Function) assert test_node.name == 'node_name' @@ -59,7 +62,7 @@ class TestRemoveFileNodeCreator: def test_create_node(): """ Test the create_node method """ - test_node = nd.RemoveFileNodeCreator.create_node('node_name') + test_node = nodes.RemoveFileNodeCreator.create_node('node_name') assert isinstance(test_node, Node) assert isinstance(test_node.interface, Function) assert test_node.name == 'node_name' From 0a50928cec5e1fb10cfc3ec5d9baacb4697cfe88 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 14:28:54 +0100 Subject: [PATCH 23/41] Remove nodes [skip ci] --- narps_open/pipelines/team_U26C.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 76645ce2..7b74fa61 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -20,7 +20,7 @@ from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation from narps_open.data.participants import get_group -from narps_open.core.common import remove_file, list_intersection, elements_in_string, clean_list +from narps_open.core.nodes import RemoveDirectoryNodeCreator class PipelineTeamU26C(Pipeline): """ A class that defines the pipeline of team U26C. """ @@ -167,6 +167,9 @@ def get_subject_level_analysis(self): Returns: - subject_level_analysis : nipype.WorkFlow """ + # Define the workflow's working directory + working_dir = join(self.directories.working_dir, 'subject_level_analysis') + # Identity interface Node - to iterate over subject_id and run infosource = Node(interface=IdentityInterface(fields=['subject_id']), name = 'infosource') @@ -188,11 +191,19 @@ def get_subject_level_analysis(self): # Gunzip - gunzip files because SPM do not use .nii.gz files gunzip = MapNode(Gunzip(), name='gunzip', iterfield=['in_file']) + # Remove Node - Remove gunzip files once they are no longer needed + remove_gunzip = RemoveDirectoryNodeCreator.create_node('remove_gunzip') + remove_gunzip.inputs.directory_name = join(working_dir, gunzip.name) + # Smooth warped functionals. smooth = Node(Smooth(), name = 'smooth') smooth.inputs.fwhm = self.fwhm smooth.overwrite = False + # Remove Node - Remove smoothed files once they are no longer needed + remove_smooth = RemoveDirectoryNodeCreator.create_node('remove_smooth') + remove_smooth.inputs.directory_name = join(working_dir, smooth.name) + # Function node get_subject_information - get subject specific condition information getsubinforuns = Node(Function( function = self.get_subject_information, @@ -243,9 +254,11 @@ def get_subject_level_analysis(self): (selectderivs, gunzip, [('func', 'in_file')]), (selectderivs, confounds, [('confounds', 'filepath')]), (gunzip, smooth, [('out_file', 'in_files')]), + (smooth, remove_gunzip, [('smoothed_files', '_')]), (getsubinforuns, modelspec, [('subject_info', 'subject_info')]), (confounds, modelspec, [('confounds_file', 'realignment_parameters')]), (smooth, modelspec, [('smoothed_files', 'functional_runs')]), + (modelspec, remove_smooth, [('session_info', '_')]), (modelspec, level1design, [('session_info', 'session_info')]), (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]), (level1estimate, contrast_estimate,[ From c33a4bf51dca2fe9b709a5e80e191e8d52c6139f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 14:52:26 +0100 Subject: [PATCH 24/41] Remove nodes [skip ci] --- narps_open/pipelines/team_U26C.py | 34 +++++++++++++++++++------------ 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 7b74fa61..8d7e7c32 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -21,6 +21,7 @@ from narps_open.data.task import TaskInformation from narps_open.data.participants import get_group from narps_open.core.nodes import RemoveDirectoryNodeCreator +from narps_open.utils.configuration import Configuration class PipelineTeamU26C(Pipeline): """ A class that defines the pipeline of team U26C. """ @@ -167,9 +168,6 @@ def get_subject_level_analysis(self): Returns: - subject_level_analysis : nipype.WorkFlow """ - # Define the workflow's working directory - working_dir = join(self.directories.working_dir, 'subject_level_analysis') - # Identity interface Node - to iterate over subject_id and run infosource = Node(interface=IdentityInterface(fields=['subject_id']), name = 'infosource') @@ -191,19 +189,11 @@ def get_subject_level_analysis(self): # Gunzip - gunzip files because SPM do not use .nii.gz files gunzip = MapNode(Gunzip(), name='gunzip', iterfield=['in_file']) - # Remove Node - Remove gunzip files once they are no longer needed - remove_gunzip = RemoveDirectoryNodeCreator.create_node('remove_gunzip') - remove_gunzip.inputs.directory_name = join(working_dir, gunzip.name) - # Smooth warped functionals. smooth = Node(Smooth(), name = 'smooth') smooth.inputs.fwhm = self.fwhm smooth.overwrite = False - # Remove Node - Remove smoothed files once they are no longer needed - remove_smooth = RemoveDirectoryNodeCreator.create_node('remove_smooth') - remove_smooth.inputs.directory_name = join(working_dir, smooth.name) - # Function node get_subject_information - get subject specific condition information getsubinforuns = Node(Function( function = self.get_subject_information, @@ -254,11 +244,9 @@ def get_subject_level_analysis(self): (selectderivs, gunzip, [('func', 'in_file')]), (selectderivs, confounds, [('confounds', 'filepath')]), (gunzip, smooth, [('out_file', 'in_files')]), - (smooth, remove_gunzip, [('smoothed_files', '_')]), (getsubinforuns, modelspec, [('subject_info', 'subject_info')]), (confounds, modelspec, [('confounds_file', 'realignment_parameters')]), (smooth, modelspec, [('smoothed_files', 'functional_runs')]), - (modelspec, remove_smooth, [('session_info', '_')]), (modelspec, level1design, [('session_info', 'session_info')]), (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]), (level1estimate, contrast_estimate,[ @@ -267,6 +255,26 @@ def get_subject_level_analysis(self): ('residual_image', 'residual_image')]) ]) + # Remove large files, if requested + if Configuration()['pipelines']['remove_unused_data']: + + # Workflow's working directory + working_dir = join(subject_level_analysis.base_dir, subject_level_analysis.name) + + # Remove Node - Remove gunzip files once they are no longer needed + remove_gunzip = RemoveDirectoryNodeCreator.create_node('remove_gunzip') + remove_gunzip.inputs.directory_name = join(working_dir, gunzip.name) + + # Remove Node - Remove smoothed files once they are no longer needed + remove_smooth = RemoveDirectoryNodeCreator.create_node('remove_smooth') + remove_smooth.inputs.directory_name = join(working_dir, smooth.name) + + # Add connections + subject_level_analysis.connect([ + (smooth, remove_gunzip, [('smoothed_files', '_')]), + (modelspec, remove_smooth, [('session_info', '_')]) + ]) + return subject_level_analysis def get_subject_level_outputs(self): From d1772220b8d499d3edfd843ec4d0db6fe5f9fb12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 15:41:47 +0100 Subject: [PATCH 25/41] Remove nodes [skip ci] --- narps_open/pipelines/team_U26C.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 8d7e7c32..eae1cc15 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -263,16 +263,18 @@ def get_subject_level_analysis(self): # Remove Node - Remove gunzip files once they are no longer needed remove_gunzip = RemoveDirectoryNodeCreator.create_node('remove_gunzip') - remove_gunzip.inputs.directory_name = join(working_dir, gunzip.name) + gunzip_dir = lambda s : join(working_dir, f'_subject_id_{s}', gunzip.name) # Remove Node - Remove smoothed files once they are no longer needed remove_smooth = RemoveDirectoryNodeCreator.create_node('remove_smooth') - remove_smooth.inputs.directory_name = join(working_dir, smooth.name) + smooth_dir = lambda s : join(working_dir, f'_subject_id_{s}', smooth.name) # Add connections subject_level_analysis.connect([ (smooth, remove_gunzip, [('smoothed_files', '_')]), - (modelspec, remove_smooth, [('session_info', '_')]) + (infosource, remove_gunzip, [(('subject_id', gunzip_dir), 'directory_name')]), + (modelspec, remove_smooth, [('session_info', '_')]), + (infosource, remove_smooth, [(('subject_id', smooth_dir), 'directory_name')]) ]) return subject_level_analysis From 34131564fa538e13523daaafc7cd83fbadd1ab85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 15:52:50 +0100 Subject: [PATCH 26/41] Remove nodes [skip ci] --- narps_open/pipelines/team_U26C.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index eae1cc15..617d5b8b 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -258,23 +258,18 @@ def get_subject_level_analysis(self): # Remove large files, if requested if Configuration()['pipelines']['remove_unused_data']: - # Workflow's working directory - working_dir = join(subject_level_analysis.base_dir, subject_level_analysis.name) - # Remove Node - Remove gunzip files once they are no longer needed remove_gunzip = RemoveDirectoryNodeCreator.create_node('remove_gunzip') - gunzip_dir = lambda s : join(working_dir, f'_subject_id_{s}', gunzip.name) # Remove Node - Remove smoothed files once they are no longer needed remove_smooth = RemoveDirectoryNodeCreator.create_node('remove_smooth') - smooth_dir = lambda s : join(working_dir, f'_subject_id_{s}', smooth.name) # Add connections subject_level_analysis.connect([ (smooth, remove_gunzip, [('smoothed_files', '_')]), - (infosource, remove_gunzip, [(('subject_id', gunzip_dir), 'directory_name')]), + (gunzip, remove_gunzip, [('out_file', 'directory_name')]), (modelspec, remove_smooth, [('session_info', '_')]), - (infosource, remove_smooth, [(('subject_id', smooth_dir), 'directory_name')]) + (smooth, remove_smooth, [('smoothed_files', 'directory_name')]) ]) return subject_level_analysis From 601597d552e687dd9fd07d264e340cc142c7de1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 16:06:00 +0100 Subject: [PATCH 27/41] Add a remove parent dir node generator --- narps_open/core/common.py | 15 +++++++++++++++ narps_open/core/nodes.py | 17 +++++++++++++++-- tests/core/test_common.py | 27 +++++++++++++++++++++++++++ tests/core/test_nodes.py | 13 +++++++++++++ 4 files changed, 70 insertions(+), 2 deletions(-) diff --git a/narps_open/core/common.py b/narps_open/core/common.py index f0d801ea..2f8a77c8 100644 --- a/narps_open/core/common.py +++ b/narps_open/core/common.py @@ -34,6 +34,21 @@ def remove_directory(_, directory_name: str) -> None: rmtree(directory_name, ignore_errors = True) +def remove_parent_directory(_, file_name: str) -> None: + """ + Fully remove directory generated by a Node, once it is not needed anymore. + This function is meant to be used in a Nipype Function Node. + + Parameters: + - _: input only used for triggering the Node + - file_name: str, a single absolute path of a file : its parent directory is to remove + """ + # This import must stay inside the function, as required by Nipype + from pathlib import Path + from shutil import rmtree + + rmtree(Path(file_name).parent.absolute(), ignore_errors = True) + def elements_in_string(input_str: str, elements: list) -> str: #| None: """ Return input_str if it contains one element of the elements list. diff --git a/narps_open/core/nodes.py b/narps_open/core/nodes.py index c28480fd..8e10ef2b 100644 --- a/narps_open/core/nodes.py +++ b/narps_open/core/nodes.py @@ -8,7 +8,7 @@ from nipype import Node from nipype.interfaces.utility import Function -from narps_open.core.common import remove_directory, remove_file +from narps_open.core.common import remove_directory, remove_parent_directory, remove_file class NodeCreator(ABC): """ An abstract class to shape what node creators must provide """ @@ -21,6 +21,19 @@ def create_node(name: str) -> Node: name, str : the name of the node """ +class RemoveParentDirectoryNodeCreator(NodeCreator): + """ A node creator that provides an interface allowing to remove a directory, + given one of its child's file name. + """ + + @staticmethod + def create_node(name: str) -> Node: + return Node(Function( + function = remove_parent_directory, + input_names = ['_', 'directory_name'], + output_names = [] + ), name = name) + class RemoveDirectoryNodeCreator(NodeCreator): """ A node creator that provides an interface allowing to remove a directory """ @@ -28,7 +41,7 @@ class RemoveDirectoryNodeCreator(NodeCreator): def create_node(name: str) -> Node: return Node(Function( function = remove_directory, - input_names = ['_', 'directory_name'], + input_names = ['_', 'file_name'], output_names = [] ), name = name) diff --git a/tests/core/test_common.py b/tests/core/test_common.py index bc5962fd..3bfb69f8 100644 --- a/tests/core/test_common.py +++ b/tests/core/test_common.py @@ -88,6 +88,33 @@ def test_remove_directory(remove_test_dir): # Check file is removed assert not exists(test_dir_path) + @staticmethod + @mark.unit_test + def test_remove_parent_directory(remove_test_dir): + """ Test the remove_parent_directory function """ + + # Create a single inside dir tree + dir_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2')) + makedirs(dir_path) + file_path = abspath(join(TEMPORARY_DIR, 'dir_1', 'dir_2', 'file1.txt')) + Path(file_path).touch() + + # Check file exist + assert exists(file_path) + + # Create a Nipype Node using remove_files + test_remove_dir_node = Node(Function( + function = co.remove_parent_directory, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'test_remove_dir_node') + test_remove_dir_node.inputs._ = '' + test_remove_dir_node.inputs.file_name = file_path + test_remove_dir_node.run() + + # Check file is removed + assert not exists(dir_path) + @staticmethod @mark.unit_test def test_node_elements_in_string(): diff --git a/tests/core/test_nodes.py b/tests/core/test_nodes.py index cc7a49d2..7ffaefbe 100644 --- a/tests/core/test_nodes.py +++ b/tests/core/test_nodes.py @@ -41,6 +41,19 @@ def create_node(name: str) -> Node: assert isinstance(test_node.interface, Select) assert test_node.name == 'node_name' +class TestRemoveParentDirectoryNodeCreator: + """ A class that contains all the unit tests for the RemoveParentDirectoryNodeCreator class.""" + + @staticmethod + @mark.unit_test + def test_create_node(): + """ Test the create_node method """ + + test_node = nodes.RemoveParentDirectoryNodeCreator.create_node('node_name') + assert isinstance(test_node, Node) + assert isinstance(test_node.interface, Function) + assert test_node.name == 'node_name' + class TestRemoveDirectoryNodeCreator: """ A class that contains all the unit tests for the RemoveDirectoryNodeCreator class.""" From b6bc7d5f4691ef1d5d10ce3dcfa1a9e0e44b2e1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 16:13:03 +0100 Subject: [PATCH 28/41] Remove nodes [skip ci] --- narps_open/pipelines/team_U26C.py | 10 +++++----- tests/core/test_common.py | 3 --- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 617d5b8b..5ce1ace8 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -20,7 +20,7 @@ from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation from narps_open.data.participants import get_group -from narps_open.core.nodes import RemoveDirectoryNodeCreator +from narps_open.core.nodes import RemoveParentDirectoryNodeCreator from narps_open.utils.configuration import Configuration class PipelineTeamU26C(Pipeline): @@ -259,17 +259,17 @@ def get_subject_level_analysis(self): if Configuration()['pipelines']['remove_unused_data']: # Remove Node - Remove gunzip files once they are no longer needed - remove_gunzip = RemoveDirectoryNodeCreator.create_node('remove_gunzip') + remove_gunzip = RemoveParentDirectoryNodeCreator.create_node('remove_gunzip') # Remove Node - Remove smoothed files once they are no longer needed - remove_smooth = RemoveDirectoryNodeCreator.create_node('remove_smooth') + remove_smooth = RemoveParentDirectoryNodeCreator.create_node('remove_smooth') # Add connections subject_level_analysis.connect([ (smooth, remove_gunzip, [('smoothed_files', '_')]), - (gunzip, remove_gunzip, [('out_file', 'directory_name')]), + (gunzip, remove_gunzip, [('out_file', 'file_name')]), (modelspec, remove_smooth, [('session_info', '_')]), - (smooth, remove_smooth, [('smoothed_files', 'directory_name')]) + (smooth, remove_smooth, [('smoothed_files', 'file_name')]) ]) return subject_level_analysis diff --git a/tests/core/test_common.py b/tests/core/test_common.py index e9bf6c3d..3bfb69f8 100644 --- a/tests/core/test_common.py +++ b/tests/core/test_common.py @@ -87,8 +87,6 @@ def test_remove_directory(remove_test_dir): # Check file is removed assert not exists(test_dir_path) -<<<<<<< HEAD -======= @staticmethod @mark.unit_test @@ -116,7 +114,6 @@ def test_remove_parent_directory(remove_test_dir): # Check file is removed assert not exists(dir_path) ->>>>>>> runner @staticmethod @mark.unit_test From 8ba6befd364e9eae0e4b5be07b9b94cc3f73a849 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 25 Jan 2024 16:54:43 +0100 Subject: [PATCH 29/41] [REFAC][DOC] Creators at interface level instead of nodes --- docs/core.md | 18 ++++---- narps_open/core/interfaces.py | 54 ++++++++++++++++++++++ narps_open/core/nodes.py | 57 ----------------------- tests/core/test_interfaces.py | 85 +++++++++++++++++++++++++++++++++++ tests/core/test_nodes.py | 81 --------------------------------- 5 files changed, 149 insertions(+), 146 deletions(-) create mode 100644 narps_open/core/interfaces.py delete mode 100644 narps_open/core/nodes.py create mode 100644 tests/core/test_interfaces.py delete mode 100644 tests/core/test_nodes.py diff --git a/docs/core.md b/docs/core.md index 2097c998..ab56fccc 100644 --- a/docs/core.md +++ b/docs/core.md @@ -125,26 +125,28 @@ This module contains a set of functions dedicated to computations on images. get_voxel_dimensions('/path/to/the/image.nii.gz') ``` -## narps_open.core.nodes +## narps_open.core.interfaces -This module contains a set of node creators inheriting form the `narps_open.core.nodes.NodeCreator` abstract class. -These are responsible for creating nipype `Node` objects (for now, only based on the `Function` interface, with functions defined in the `narps_open.core.common` module) to be used inside pipeline code. This allows to factorize code, hence making code simpler to read inside pipeline definition. +This module contains a set of interface creators inheriting form the `narps_open.core.interfaces.InterfaceCreator` abstract class. +These are responsible for creating nipype `Interface` objects (for now, only `Function` interfaces are used, with functions defined in the `narps_open.core.common` module) to be used inside pipeline code. This allows to factorize code, hence making it simpler to read inside pipeline definition. -Here is an example how to use the node creators : +Here is an example how to use the interface creators : ```python -from narps_open.core.nodes import RemoveDirectoryNodeCreator, RemoveFileNodeCreator +from narps_open.core.interfaces import ( + RemoveDirectoryInterfaceCreator, RemoveFileInterfaceCreator + ) # Create a Node to remove a directory -remove_smoothed = RemoveDirectoryNodeCreator.create_node('remove_smoothed') +remove_smoothed = Node(RemoveDirectoryInterfaceCreator.create(), name = 'remove_smoothed') remove_smoothed.inputs.directory_name = 'my_directory' # Create a Node to remove a file -remove_gunzip = RemoveFileNodeCreator.create_node('remove_gunzip') +remove_gunzip = Node(RemoveDirectoryInterfaceCreator.create(), name = 'remove_gunzip') remove_gunzip.inputs.file_name = 'my_file' ``` -For your information, this is how an equivalent code would look like without node creators. +For your information, this is how an equivalent code would look like without interface creators. ```python from nipype import Node diff --git a/narps_open/core/interfaces.py b/narps_open/core/interfaces.py new file mode 100644 index 00000000..de215b52 --- /dev/null +++ b/narps_open/core/interfaces.py @@ -0,0 +1,54 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Generate useful and recurrent interfaces to write pipelines """ + +from abc import ABC, abstractmethod + +from nipype.interfaces.base.core import Interface +from nipype.interfaces.utility import Function + +from narps_open.core.common import remove_directory, remove_parent_directory, remove_file + +class InterfaceCreator(ABC): + """ An abstract class to shape what interface creators must provide """ + + @staticmethod + @abstractmethod + def create_interface() -> Interface: + """ Return a new interface (to be defined by specialized classes) """ + +class RemoveParentDirectoryInterfaceCreator(InterfaceCreator): + """ An interface creator that provides an interface allowing to remove a directory, + given one of its child's file name. + """ + + @staticmethod + def create_interface() -> Function: + return Function( + function = remove_parent_directory, + input_names = ['_', 'file_name'], + output_names = [] + ) + +class RemoveDirectoryInterfaceCreator(InterfaceCreator): + """ An interface creator that provides an interface allowing to remove a directory """ + + @staticmethod + def create_interface() -> Function: + return Function( + function = remove_directory, + input_names = ['_', 'directory_name'], + output_names = [] + ) + +class RemoveFileInterfaceCreator(InterfaceCreator): + """ An interface creator that provides an interface allowing to remove a file """ + + @staticmethod + def create_interface() -> Function: + return Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ) diff --git a/narps_open/core/nodes.py b/narps_open/core/nodes.py deleted file mode 100644 index 8e10ef2b..00000000 --- a/narps_open/core/nodes.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -""" Generate useful and recurrent nodes to write pipelines """ - -from abc import ABC, abstractmethod - -from nipype import Node -from nipype.interfaces.utility import Function - -from narps_open.core.common import remove_directory, remove_parent_directory, remove_file - -class NodeCreator(ABC): - """ An abstract class to shape what node creators must provide """ - - @staticmethod - @abstractmethod - def create_node(name: str) -> Node: - """ Return a new Node (the interface of the Node is defined by specialized classes) - Arguments: - name, str : the name of the node - """ - -class RemoveParentDirectoryNodeCreator(NodeCreator): - """ A node creator that provides an interface allowing to remove a directory, - given one of its child's file name. - """ - - @staticmethod - def create_node(name: str) -> Node: - return Node(Function( - function = remove_parent_directory, - input_names = ['_', 'directory_name'], - output_names = [] - ), name = name) - -class RemoveDirectoryNodeCreator(NodeCreator): - """ A node creator that provides an interface allowing to remove a directory """ - - @staticmethod - def create_node(name: str) -> Node: - return Node(Function( - function = remove_directory, - input_names = ['_', 'file_name'], - output_names = [] - ), name = name) - -class RemoveFileNodeCreator(NodeCreator): - """ A node creator that provides an interface allowing to remove a file """ - - @staticmethod - def create_node(name: str) -> Node: - return Node(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = name) diff --git a/tests/core/test_interfaces.py b/tests/core/test_interfaces.py new file mode 100644 index 00000000..1634fab7 --- /dev/null +++ b/tests/core/test_interfaces.py @@ -0,0 +1,85 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'narps_open.core.interfaces' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_interfaces.py + pytest -q test_interfaces.py -k +""" + +from pytest import mark + +from nipype.interfaces.base.core import Interface +from nipype.interfaces.utility import Select, Function + +from narps_open.core import interfaces + +class ValidNC(interfaces.InterfaceCreator): + """ A valid implementation of a InterfaceCreator, for test purposes """ + + @staticmethod + def create_interface() -> Interface: + """ Return a Interface, as expected """ + return Select() + +class TestInterfaceCreator: + """ A class that contains all the unit tests for the InterfaceCreator class.""" + + @staticmethod + @mark.unit_test + def test_create_interface(): + """ Test the create_interface method """ + + test_node = ValidNC.create_interface() + assert isinstance(test_node, Select) + +class TestRemoveParentDirectoryInterfaceCreator: + """ A class that contains all the unit tests for the + RemoveParentDirectoryInterfaceCreator class. + """ + + @staticmethod + @mark.unit_test + def test_create_interface(): + """ Test the create_interface method """ + + test_node = interfaces.RemoveParentDirectoryInterfaceCreator.create_interface() + assert isinstance(test_node, Function) + inputs = str(test_node.inputs) + assert '_ = ' in inputs + assert 'file_name = ' in inputs + assert 'function_str = def remove_parent_directory(_, file_name: str) -> None:' in inputs + +class TestRemoveDirectoryInterfaceCreator: + """ A class that contains all the unit tests for the RemoveDirectoryInterfaceCreator class.""" + + @staticmethod + @mark.unit_test + def test_create_interface(): + """ Test the create_interface method """ + + test_node = interfaces.RemoveDirectoryInterfaceCreator.create_interface() + assert isinstance(test_node, Function) + inputs = str(test_node.inputs) + assert '_ = ' in inputs + assert 'directory_name = ' in inputs + assert 'function_str = def remove_directory(_, directory_name: str) -> None:' in inputs + +class TestRemoveFileInterfaceCreator: + """ A class that contains all the unit tests for the RemoveFileInterfaceCreator class.""" + + @staticmethod + @mark.unit_test + def test_create_interface(): + """ Test the create_interface method """ + + test_node = interfaces.RemoveFileInterfaceCreator.create_interface() + assert isinstance(test_node, Function) + inputs = str(test_node.inputs) + assert '_ = ' in inputs + assert 'file_name = ' in inputs + assert 'function_str = def remove_file(_, file_name: str) -> None:' in inputs diff --git a/tests/core/test_nodes.py b/tests/core/test_nodes.py deleted file mode 100644 index 7ffaefbe..00000000 --- a/tests/core/test_nodes.py +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -""" Tests of the 'narps_open.core.nodes' module. - -Launch this test with PyTest - -Usage: -====== - pytest -q test_nodes.py - pytest -q test_nodes.py -k -""" - -from pytest import mark - -from nipype import Node -from nipype.interfaces.utility import Select, Function - -from narps_open.core import nodes - -class TestNodeCreator: - """ A class that contains all the unit tests for the NodeCreator class.""" - - @staticmethod - @mark.unit_test - def test_create_node(): - """ Test the create_node method """ - - # Define another child for NodeCreator - class ValidNC(nodes.NodeCreator): - """ A valid implementation of a NodeCreator """ - - @staticmethod - def create_node(name: str) -> Node: - """ Return a Node, as expected """ - return Node(Select(), name = name) - - # Test it can be instantiated - test_node = ValidNC.create_node('node_name') - assert isinstance(test_node, Node) - assert isinstance(test_node.interface, Select) - assert test_node.name == 'node_name' - -class TestRemoveParentDirectoryNodeCreator: - """ A class that contains all the unit tests for the RemoveParentDirectoryNodeCreator class.""" - - @staticmethod - @mark.unit_test - def test_create_node(): - """ Test the create_node method """ - - test_node = nodes.RemoveParentDirectoryNodeCreator.create_node('node_name') - assert isinstance(test_node, Node) - assert isinstance(test_node.interface, Function) - assert test_node.name == 'node_name' - -class TestRemoveDirectoryNodeCreator: - """ A class that contains all the unit tests for the RemoveDirectoryNodeCreator class.""" - - @staticmethod - @mark.unit_test - def test_create_node(): - """ Test the create_node method """ - - test_node = nodes.RemoveDirectoryNodeCreator.create_node('node_name') - assert isinstance(test_node, Node) - assert isinstance(test_node.interface, Function) - assert test_node.name == 'node_name' - -class TestRemoveFileNodeCreator: - """ A class that contains all the unit tests for the RemoveFileNodeCreator class.""" - - @staticmethod - @mark.unit_test - def test_create_node(): - """ Test the create_node method """ - - test_node = nodes.RemoveFileNodeCreator.create_node('node_name') - assert isinstance(test_node, Node) - assert isinstance(test_node.interface, Function) - assert test_node.name == 'node_name' From 4ffaa5ca9fa06e91f7c41f2e27877e84c921a9f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 26 Jan 2024 11:57:23 +0100 Subject: [PATCH 30/41] Creating an interface factory --- docs/core.md | 13 ++++++----- narps_open/core/interfaces.py | 20 +++++++++++++++++ tests/core/test_interfaces.py | 42 +++++++++++++++++++++++++---------- 3 files changed, 57 insertions(+), 18 deletions(-) diff --git a/docs/core.md b/docs/core.md index ab56fccc..4959bce5 100644 --- a/docs/core.md +++ b/docs/core.md @@ -128,21 +128,22 @@ get_voxel_dimensions('/path/to/the/image.nii.gz') ## narps_open.core.interfaces This module contains a set of interface creators inheriting form the `narps_open.core.interfaces.InterfaceCreator` abstract class. -These are responsible for creating nipype `Interface` objects (for now, only `Function` interfaces are used, with functions defined in the `narps_open.core.common` module) to be used inside pipeline code. This allows to factorize code, hence making it simpler to read inside pipeline definition. +These are responsible for creating nipype `Interface` objects (for now, only `Function` interfaces are used, with functions defined in the `narps_open.core.common` module) to be used inside pipeline code. +The module also provide an `InterfaceFactory` to easily create the available interface, without knowing which creator is responsible for that. + +The overall allows to factorize code, hence making it simpler to read inside pipeline definition. Here is an example how to use the interface creators : ```python -from narps_open.core.interfaces import ( - RemoveDirectoryInterfaceCreator, RemoveFileInterfaceCreator - ) +from narps_open.core.interfaces import InterfaceFactory # Create a Node to remove a directory -remove_smoothed = Node(RemoveDirectoryInterfaceCreator.create(), name = 'remove_smoothed') +remove_smoothed = Node(InterfaceFactory.create('remove_directory'), name = 'remove_smoothed') remove_smoothed.inputs.directory_name = 'my_directory' # Create a Node to remove a file -remove_gunzip = Node(RemoveDirectoryInterfaceCreator.create(), name = 'remove_gunzip') +remove_gunzip = Node(InterfaceFactory.create('remove_file'), name = 'remove_gunzip') remove_gunzip.inputs.file_name = 'my_file' ``` diff --git a/narps_open/core/interfaces.py b/narps_open/core/interfaces.py index de215b52..1d356200 100644 --- a/narps_open/core/interfaces.py +++ b/narps_open/core/interfaces.py @@ -52,3 +52,23 @@ def create_interface() -> Function: input_names = ['_', 'file_name'], output_names = [] ) + +class InterfaceFactory(): + """ A class to generate interfaces from narps_open.core functions """ + + # A list of creators, one for each function + creators = { + 'remove_directory' : RemoveDirectoryInterfaceCreator, + 'remove_parent_directory' : RemoveParentDirectoryInterfaceCreator, + 'remove_file' : RemoveFileInterfaceCreator + } + + @classmethod + def create(cls, creator_name: str): + """ Return a new Function interface + Arguments : + creator_name, str : the key for the creator to be used + """ + # Actually create the interface, using a creator + creator = cls.creators[creator_name] + return creator.create_interface() diff --git a/tests/core/test_interfaces.py b/tests/core/test_interfaces.py index 1634fab7..2dca54bb 100644 --- a/tests/core/test_interfaces.py +++ b/tests/core/test_interfaces.py @@ -11,7 +11,7 @@ pytest -q test_interfaces.py -k """ -from pytest import mark +from pytest import mark, raises from nipype.interfaces.base.core import Interface from nipype.interfaces.utility import Select, Function @@ -34,8 +34,8 @@ class TestInterfaceCreator: def test_create_interface(): """ Test the create_interface method """ - test_node = ValidNC.create_interface() - assert isinstance(test_node, Select) + test_interface = ValidNC.create_interface() + assert isinstance(test_interface, Select) class TestRemoveParentDirectoryInterfaceCreator: """ A class that contains all the unit tests for the @@ -47,9 +47,9 @@ class TestRemoveParentDirectoryInterfaceCreator: def test_create_interface(): """ Test the create_interface method """ - test_node = interfaces.RemoveParentDirectoryInterfaceCreator.create_interface() - assert isinstance(test_node, Function) - inputs = str(test_node.inputs) + test_interface = interfaces.RemoveParentDirectoryInterfaceCreator.create_interface() + assert isinstance(test_interface, Function) + inputs = str(test_interface.inputs) assert '_ = ' in inputs assert 'file_name = ' in inputs assert 'function_str = def remove_parent_directory(_, file_name: str) -> None:' in inputs @@ -62,9 +62,9 @@ class TestRemoveDirectoryInterfaceCreator: def test_create_interface(): """ Test the create_interface method """ - test_node = interfaces.RemoveDirectoryInterfaceCreator.create_interface() - assert isinstance(test_node, Function) - inputs = str(test_node.inputs) + test_interface = interfaces.RemoveDirectoryInterfaceCreator.create_interface() + assert isinstance(test_interface, Function) + inputs = str(test_interface.inputs) assert '_ = ' in inputs assert 'directory_name = ' in inputs assert 'function_str = def remove_directory(_, directory_name: str) -> None:' in inputs @@ -77,9 +77,27 @@ class TestRemoveFileInterfaceCreator: def test_create_interface(): """ Test the create_interface method """ - test_node = interfaces.RemoveFileInterfaceCreator.create_interface() - assert isinstance(test_node, Function) - inputs = str(test_node.inputs) + test_interface = interfaces.RemoveFileInterfaceCreator.create_interface() + assert isinstance(test_interface, Function) + inputs = str(test_interface.inputs) + assert '_ = ' in inputs + assert 'file_name = ' in inputs + assert 'function_str = def remove_file(_, file_name: str) -> None:' in inputs + +class TestInterfaceFactory: + """ A class that contains all the unit tests for the InterfaceFactory class.""" + + @staticmethod + @mark.unit_test + def test_create(): + """ Test the create method """ + + with raises(KeyError): + interfaces.InterfaceFactory.create('fake_function') + + test_interface = interfaces.InterfaceFactory.create('remove_file') + assert isinstance(test_interface, Function) + inputs = str(test_interface.inputs) assert '_ = ' in inputs assert 'file_name = ' in inputs assert 'function_str = def remove_file(_, file_name: str) -> None:' in inputs From 5effd4c1c2ae5d5304a5ff09c27e4a50dd41e61a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 26 Jan 2024 12:03:07 +0100 Subject: [PATCH 31/41] Remove node modules after merging --- narps_open/core/nodes.py | 44 -------------------------- tests/core/test_nodes.py | 68 ---------------------------------------- 2 files changed, 112 deletions(-) delete mode 100644 narps_open/core/nodes.py delete mode 100644 tests/core/test_nodes.py diff --git a/narps_open/core/nodes.py b/narps_open/core/nodes.py deleted file mode 100644 index c28480fd..00000000 --- a/narps_open/core/nodes.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -""" Generate useful and recurrent nodes to write pipelines """ - -from abc import ABC, abstractmethod - -from nipype import Node -from nipype.interfaces.utility import Function - -from narps_open.core.common import remove_directory, remove_file - -class NodeCreator(ABC): - """ An abstract class to shape what node creators must provide """ - - @staticmethod - @abstractmethod - def create_node(name: str) -> Node: - """ Return a new Node (the interface of the Node is defined by specialized classes) - Arguments: - name, str : the name of the node - """ - -class RemoveDirectoryNodeCreator(NodeCreator): - """ A node creator that provides an interface allowing to remove a directory """ - - @staticmethod - def create_node(name: str) -> Node: - return Node(Function( - function = remove_directory, - input_names = ['_', 'directory_name'], - output_names = [] - ), name = name) - -class RemoveFileNodeCreator(NodeCreator): - """ A node creator that provides an interface allowing to remove a file """ - - @staticmethod - def create_node(name: str) -> Node: - return Node(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = name) diff --git a/tests/core/test_nodes.py b/tests/core/test_nodes.py deleted file mode 100644 index cc7a49d2..00000000 --- a/tests/core/test_nodes.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -""" Tests of the 'narps_open.core.nodes' module. - -Launch this test with PyTest - -Usage: -====== - pytest -q test_nodes.py - pytest -q test_nodes.py -k -""" - -from pytest import mark - -from nipype import Node -from nipype.interfaces.utility import Select, Function - -from narps_open.core import nodes - -class TestNodeCreator: - """ A class that contains all the unit tests for the NodeCreator class.""" - - @staticmethod - @mark.unit_test - def test_create_node(): - """ Test the create_node method """ - - # Define another child for NodeCreator - class ValidNC(nodes.NodeCreator): - """ A valid implementation of a NodeCreator """ - - @staticmethod - def create_node(name: str) -> Node: - """ Return a Node, as expected """ - return Node(Select(), name = name) - - # Test it can be instantiated - test_node = ValidNC.create_node('node_name') - assert isinstance(test_node, Node) - assert isinstance(test_node.interface, Select) - assert test_node.name == 'node_name' - -class TestRemoveDirectoryNodeCreator: - """ A class that contains all the unit tests for the RemoveDirectoryNodeCreator class.""" - - @staticmethod - @mark.unit_test - def test_create_node(): - """ Test the create_node method """ - - test_node = nodes.RemoveDirectoryNodeCreator.create_node('node_name') - assert isinstance(test_node, Node) - assert isinstance(test_node.interface, Function) - assert test_node.name == 'node_name' - -class TestRemoveFileNodeCreator: - """ A class that contains all the unit tests for the RemoveFileNodeCreator class.""" - - @staticmethod - @mark.unit_test - def test_create_node(): - """ Test the create_node method """ - - test_node = nodes.RemoveFileNodeCreator.create_node('node_name') - assert isinstance(test_node, Node) - assert isinstance(test_node.interface, Function) - assert test_node.name == 'node_name' From a5317c88a888df09c1c8f7960b36137283a9c0e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 26 Jan 2024 13:40:20 +0100 Subject: [PATCH 32/41] Conditional nodes for removal [skip ci] --- narps_open/pipelines/team_U26C.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 5ce1ace8..62e8c788 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -20,7 +20,7 @@ from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation from narps_open.data.participants import get_group -from narps_open.core.nodes import RemoveParentDirectoryNodeCreator +from narps_open.core.interfaces import InterfaceFactory from narps_open.utils.configuration import Configuration class PipelineTeamU26C(Pipeline): @@ -259,10 +259,18 @@ def get_subject_level_analysis(self): if Configuration()['pipelines']['remove_unused_data']: # Remove Node - Remove gunzip files once they are no longer needed - remove_gunzip = RemoveParentDirectoryNodeCreator.create_node('remove_gunzip') + remove_gunzip = MapNode( + InterfaceFactory.create('remove_parent_directory'), + name = 'remove_gunzip', + iterfield = ['file_name'] + ) # Remove Node - Remove smoothed files once they are no longer needed - remove_smooth = RemoveParentDirectoryNodeCreator.create_node('remove_smooth') + remove_smooth = MapNode( + InterfaceFactory.create('remove_parent_directory'), + name = 'remove_smooth', + iterfield = ['file_name'] + ) # Add connections subject_level_analysis.connect([ From a703abd7094a5d0d503e3da49ebf4b2deed7e292 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 26 Jan 2024 14:39:51 +0100 Subject: [PATCH 33/41] Add group level workflows [skip ci] --- narps_open/pipelines/team_U26C.py | 254 +++++++++++++++++++++++++++++- 1 file changed, 246 insertions(+), 8 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 62e8c788..1f37d5ff 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -21,6 +21,7 @@ from narps_open.data.task import TaskInformation from narps_open.data.participants import get_group from narps_open.core.interfaces import InterfaceFactory +from narps_open.core.common import list_intersection, elements_in_string, clean_list from narps_open.utils.configuration import Configuration class PipelineTeamU26C(Pipeline): @@ -30,6 +31,7 @@ def __init__(self): super().__init__() self.fwhm = 5.0 self.team_id = 'U26C' + self.contrast_list = ['0001', '0002', '0003'] gamble = [f'gamble_run{r}' for r in range(1, len(self.run_list) + 1)] gain = [f'gamble_run{r}xgain_run{r}^1' for r in range(1, len(self.run_list) + 1)] @@ -75,7 +77,7 @@ def get_subject_information(event_files: list): trial_key = f'gamble_run{run_id + 1}' gain_key = f'gain_run{run_id + 1}' loss_key = f'loss_run{run_id + 1}' - + onsets.update({trial_key: []}) durations.update({trial_key: []}) weights_gain.update({gain_key: []}) @@ -83,7 +85,7 @@ def get_subject_information(event_files: list): with open(event_file, 'rt') as file: next(file) # skip the header - + for line in file: info = line.strip().split() onsets[trial_key].append(float(info[0])) @@ -169,7 +171,8 @@ def get_subject_level_analysis(self): - subject_level_analysis : nipype.WorkFlow """ # Identity interface Node - to iterate over subject_id and run - infosource = Node(interface=IdentityInterface(fields=['subject_id']), + infosource = Node( + IdentityInterface(fields = ['subject_id']), name = 'infosource') infosource.iterables = [('subject_id', self.subject_list)] @@ -181,11 +184,15 @@ def get_subject_level_analysis(self): 'sub-{subject_id}_task-MGT_run-*_bold_confounds.tsv'), 'events': join('sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_events.tsv') - } + } selectderivs = Node(SelectFiles(templates), name = 'selectderivs') selectderivs.inputs.base_directory = self.directories.dataset_dir selectderivs.inputs.sort_filelist = True + # DataSink - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + # Gunzip - gunzip files because SPM do not use .nii.gz files gunzip = MapNode(Gunzip(), name='gunzip', iterfield=['in_file']) @@ -252,7 +259,11 @@ def get_subject_level_analysis(self): (level1estimate, contrast_estimate,[ ('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')]) + ('residual_image', 'residual_image')]), + (contrast_estimate, data_sink, [ + ('con_images', f'{subject_level_analysis.name}.@con_images'), + ('spmT_images', f'{subject_level_analysis.name}.@spmT_images'), + ('spm_mat_file', f'{subject_level_analysis.name}.@spm_mat_file')]) ]) # Remove large files, if requested @@ -325,17 +336,244 @@ def get_group_level_analysis(self): - a list of nipype.WorkFlow """ - def get_group_level_analysis_sub_workflow(self, method): + return [ + self.get_group_level_analysis_single_group('equalRange'), + self.get_group_level_analysis_single_group('equalIndifference'), + self.get_group_level_analysis_group_comparison() + ] + + def get_group_level_analysis_single_group(self, method): """ - Return a workflow for the group level analysis. + Return a workflow for the group level analysis in the single group case. Parameters: - - method: one of 'equalRange', 'equalIndifference' or 'groupComp' + - method: one of 'equalRange', 'equalIndifference' Returns: - group_level_analysis: nipype.WorkFlow """ + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Infosource - a function free node to iterate over the list of subject names + infosource = Node(IdentityInterface(fields=['contrast_id']), + name = 'infosource') + infosource.iterables = [('contrast_id', self.contrast_list)] + + # Select files from subject level analysis + templates = { + 'contrasts': join(self.directories.output_dir, + 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), + 'mask': '/data/pt_nmc002/other/narps/derivatives/fmriprep/gr_mask_tmax.nii' + } + selectderivs = Node(SelectFiles(templates), name = 'selectderivs') + selectderivs.inputs.sort_filelist = True + selectderivs.inputs.base_directory = self.directories.dataset_dir + + # Datasink - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # Function Node get_group_subjects + # Get subjects in the group and in the subject_list + get_group_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_group_subjects' + ) + get_group_subjects.inputs.list_1 = get_group(method) + get_group_subjects.inputs.list_2 = self.subject_list + + # Create a function to complete the subject ids out from the get_equal_*_subjects nodes + # If not complete, subject id '001' in search patterns + # would match all contrast files with 'con_0001.nii'. + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts', iterfield = 'input_str' + ) + + # One Sample T-Test Design - creates one sample T-Test Design + onesamplettestdes = Node(OneSampleTTestDesign(), name = 'onesampttestdes') + + # EstimateModel - estimate the parameters of the model + # Even for second level it should be 'Classical': 1. + level2estimate = Node(EstimateModel(), name = 'level2estimate') + level2estimate.inputs.estimation_method = {'Classical': 1} + + # EstimateContrast - estimates simple group contrast + level2conestimate = Node(EstimateContrast(), name = 'level2conestimate') + level2conestimate.inputs.group_contrast = True + level2conestimate.inputs.contrasts = [['Group', 'T', ['mean'], [1]]] + + # Create the group level workflow + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_nsub_{nb_subjects}') + group_level_analysis.connect([ + (infosource, selectderivs, [('contrast_id', 'contrast_id')]), + (selectderivs, get_contrasts, [('contrasts', 'input_str')]), + (get_group_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_contrasts, onesamplettestdes, [ + (('out_list', clean_list), 'in_files') + ]), + (selectderivs, onesamplettestdes, [('mask', 'explicit_mask_file')]), + (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]), + (level2estimate, level2conestimate, [ + ('spm_mat_file', 'spm_mat_file'), + ('beta_images', 'beta_images'), + ('residual_image', 'residual_image') + ]), + (level2estimate, data_sink, [ + ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), + (level2conestimate, data_sink, [ + ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), + ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), + ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]) + ]) + + return group_level_analysis + + def get_group_level_analysis_group_comparison(self): + """ + Return a workflow for the group level analysis in the group comparison case. + + Returns: + - group_level_analysis: nipype.WorkFlow + """ + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Infosource - a function free node to iterate over the list of subject names + infosource = Node(IdentityInterface(fields=['contrast_id']), + name = 'infosource') + infosource.iterables = [('contrast_id', self.contrast_list)] + + # Select files from subject level analysis + templates = { + 'contrasts': join(self.directories.output_dir, + 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), + 'mask': '/data/pt_nmc002/other/narps/derivatives/fmriprep/gr_mask_tmax.nii' + } + selectderivs = Node(SelectFiles(templates), name = 'selectderivs') + selectderivs.inputs.sort_filelist = True + selectderivs.inputs.base_directory = self.directories.dataset_dir + + # Datasink - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # Function Node get_group_subjects + # Get subjects in the group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_group_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Function Node get_group_subjects + # Get subjects in the group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_group_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Create a function to complete the subject ids out from the get_equal_*_subjects nodes + # If not complete, subject id '001' in search patterns + # would match all contrast files with 'con_0001.nii'. + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_equal_indifference_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_contrasts', iterfield = 'input_str' + ) + get_equal_range_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_equal_range_contrasts', iterfield = 'input_str' + ) + + # Two Sample T-Test Design + twosampttest = Node(TwoSampleTTestDesign(), name = 'twosampttest') + + # EstimateModel - estimate the parameters of the model + # Even for second level it should be 'Classical': 1. + level2estimate = Node(EstimateModel(), name = 'level2estimate') + level2estimate.inputs.estimation_method = {'Classical': 1} + + # EstimateContrast - estimates simple group contrast + level2conestimate = Node(EstimateContrast(), name = 'level2conestimate') + level2conestimate.inputs.group_contrast = True + level2conestimate.inputs.contrasts = [ + ['Eq range vs Eq indiff in loss', 'T', ['mean'], [1, -1]] + ] + + # Create the group level workflow + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_groupComp_nsub_{nb_subjects}') + group_level_analysis.connect([ + (infosource, selectderivs, [('contrast_id', 'contrast_id')]), + (selectderivs, get_equal_range_contrasts, [('contrasts', 'input_str')]), + (selectderivs, get_equal_indifference_contrasts, [('contrasts', 'input_str')]), + (get_equal_range_subjects, get_equal_range_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_equal_indifference_subjects, get_equal_indifference_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_equal_range_contrasts, twosampttest, [ + (('out_list', clean_list), 'group1_files') + ]), + (get_equal_indifference_contrasts, twosampttest, [ + (('out_list', clean_list), 'group2_files') + ]), + (selectderivs, twosampttest, [('mask', 'explicit_mask_file')]), + (twosampttest, level2estimate, [('spm_mat_file', 'spm_mat_file')]), + (level2estimate, level2conestimate, [ + ('spm_mat_file', 'spm_mat_file'), + ('beta_images', 'beta_images'), + ('residual_image', 'residual_image') + ]), + (level2estimate, data_sink, [ + ('mask_image', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@mask')]), + (level2conestimate, data_sink, [ + ('spm_mat_file', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@spm_mat'), + ('spmT_images', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@T'), + ('con_images', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@con')]) + ]) + return group_level_analysis def get_group_level_outputs(self): """ Return all names for the files the group level analysis is supposed to generate. """ From 4d3aa8eebdbb0afdcb0f07125bd2af75b35073f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 26 Jan 2024 15:58:51 +0100 Subject: [PATCH 34/41] Group levels + tests [skip ci] --- narps_open/pipelines/team_U26C.py | 156 ++++++++++++------------------ tests/pipelines/test_team_U26C.py | 13 ++- 2 files changed, 68 insertions(+), 101 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 1f37d5ff..d89152fc 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -296,30 +296,17 @@ def get_subject_level_analysis(self): def get_subject_level_outputs(self): """ Return the names of the files the subject level analysis is supposed to generate. """ - # Handle gain files templates = [join( - self.directories.output_dir, - 'subject_level_analysis_gain', '_subject_id_{subject_id}', 'con_0001.nii')] - templates += [join( - self.directories.output_dir, - 'subject_level_analysis_gain', '_subject_id_{subject_id}', 'SPM.mat')] - templates += [join( - self.directories.output_dir, - 'subject_level_analysis_gain', '_subject_id_{subject_id}', 'spmT_0001.nii')] - - # Handle loss files - contrast_list = ['0001', '0002'] - templates += [join( self.directories.output_dir, 'subject_level_analysis_loss', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ - for contrast_id in contrast_list] + for contrast_id in self.contrast_list] templates += [join( self.directories.output_dir, 'subject_level_analysis_loss', '_subject_id_{subject_id}', 'SPM.mat')] templates += [join( self.directories.output_dir, 'subject_level_analysis_loss', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ - for contrast_id in contrast_list] + for contrast_id in self.contrast_list] # Format with subject_ids return_list = [] @@ -364,7 +351,7 @@ def get_group_level_analysis_single_group(self, method): templates = { 'contrasts': join(self.directories.output_dir, 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), - 'mask': '/data/pt_nmc002/other/narps/derivatives/fmriprep/gr_mask_tmax.nii' + #'mask': join('derivatives/fmriprep/gr_mask_tmax.nii') } selectderivs = Node(SelectFiles(templates), name = 'selectderivs') selectderivs.inputs.sort_filelist = True @@ -416,6 +403,14 @@ def get_group_level_analysis_single_group(self, method): level2conestimate.inputs.group_contrast = True level2conestimate.inputs.contrasts = [['Group', 'T', ['mean'], [1]]] + # Threshold Node - Create thresholded maps + threshold = Node(Threshold(), name = 'threshold') + threshold.inputs.use_fwe_correction = True + threshold.inputs.height_threshold_type = 'p-value' + threshold.inputs.force_activation = False + threshold.inputs.height_threshold = 0.05 + threshold.inputs.contrast_index = 1 + # Create the group level workflow group_level_analysis = Workflow( base_dir = self.directories.working_dir, @@ -429,19 +424,25 @@ def get_group_level_analysis_single_group(self, method): (get_contrasts, onesamplettestdes, [ (('out_list', clean_list), 'in_files') ]), - (selectderivs, onesamplettestdes, [('mask', 'explicit_mask_file')]), + #(selectderivs, onesamplettestdes, [('mask', 'explicit_mask_file')]), (onesamplettestdes, level2estimate, [('spm_mat_file', 'spm_mat_file')]), (level2estimate, level2conestimate, [ ('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), ('residual_image', 'residual_image') ]), + (level2conestimate, threshold, [ + ('spm_mat_file', 'spm_mat_file'), + ('spmT_images', 'stat_image') + ]), (level2estimate, data_sink, [ ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), (level2conestimate, data_sink, [ ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), - ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]) + ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]), + (threshold, data_sink, [ + ('thresholded_map', f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh')]) ]) return group_level_analysis @@ -465,7 +466,7 @@ def get_group_level_analysis_group_comparison(self): templates = { 'contrasts': join(self.directories.output_dir, 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), - 'mask': '/data/pt_nmc002/other/narps/derivatives/fmriprep/gr_mask_tmax.nii' + #'mask': join('derivatives/fmriprep/gr_mask_tmax.nii') } selectderivs = Node(SelectFiles(templates), name = 'selectderivs') selectderivs.inputs.sort_filelist = True @@ -482,7 +483,7 @@ def get_group_level_analysis_group_comparison(self): input_names = ['list_1', 'list_2'], output_names = ['out_list'] ), - name = 'get_group_subjects' + name = 'get_equal_indifference_subjects' ) get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') get_equal_indifference_subjects.inputs.list_2 = self.subject_list @@ -494,7 +495,7 @@ def get_group_level_analysis_group_comparison(self): input_names = ['list_1', 'list_2'], output_names = ['out_list'] ), - name = 'get_group_subjects' + name = 'get_equal_range_subjects' ) get_equal_range_subjects.inputs.list_1 = get_group('equalRange') get_equal_range_subjects.inputs.list_2 = self.subject_list @@ -538,6 +539,14 @@ def get_group_level_analysis_group_comparison(self): ['Eq range vs Eq indiff in loss', 'T', ['mean'], [1, -1]] ] + # Threshold Node - Create thresholded maps + threshold = Node(Threshold(), name = 'threshold') + threshold.inputs.use_fwe_correction = True + threshold.inputs.height_threshold_type = 'p-value' + threshold.inputs.force_activation = False + threshold.inputs.height_threshold = 0.05 + threshold.inputs.contrast_index = 1 + # Create the group level workflow group_level_analysis = Workflow( base_dir = self.directories.working_dir, @@ -558,142 +567,101 @@ def get_group_level_analysis_group_comparison(self): (get_equal_indifference_contrasts, twosampttest, [ (('out_list', clean_list), 'group2_files') ]), - (selectderivs, twosampttest, [('mask', 'explicit_mask_file')]), + #(selectderivs, twosampttest, [('mask', 'explicit_mask_file')]), (twosampttest, level2estimate, [('spm_mat_file', 'spm_mat_file')]), (level2estimate, level2conestimate, [ ('spm_mat_file', 'spm_mat_file'), ('beta_images', 'beta_images'), ('residual_image', 'residual_image') ]), + (level2conestimate, threshold, [ + ('spm_mat_file', 'spm_mat_file'), + ('spmT_images', 'stat_image') + ]), (level2estimate, data_sink, [ ('mask_image', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@mask')]), (level2conestimate, data_sink, [ ('spm_mat_file', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@spm_mat'), ('spmT_images', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@T'), - ('con_images', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@con')]) + ('con_images', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@con')]), + (threshold, data_sink, [ + ('thresholded_map', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@thresh')]) ]) return group_level_analysis + def get_group_level_outputs(self): """ Return all names for the files the group level analysis is supposed to generate. """ - # Handle equalRange and equalIndifference - - ## Contrast id 0001 - parameters = { - 'method': ['equalRange', 'equalIndifference'], - 'file': [ - 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', - 'spmT_0001.nii', 'spmT_0002.nii', - join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') - ], - 'model_type' : ['gain', 'loss'], - 'nb_subjects' : [str(len(self.subject_list))] - } - - parameter_sets = product(*parameters.values()) - template = join( - self.directories.output_dir, - 'group_level_analysis_{method}_nsub_{nb_subjects}', - '_contrast_id_0001_model_type_{model_type}', - '{file}' - ) - - return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ - for parameter_values in parameter_sets] - - ## Contrast id 0002 parameters = { - 'method': ['equalRange', 'equalIndifference'], - 'file': [ - 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', - 'spmT_0001.nii', 'spmT_0002.nii', - join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') - ], - 'nb_subjects' : [str(len(self.subject_list))] - } - - parameter_sets = product(*parameters.values()) - template = join( - self.directories.output_dir, - 'group_level_analysis_{method}_nsub_{nb_subjects}', - '_contrast_id_0002_model_type_loss', - '{file}' - ) - - return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ - for parameter_values in parameter_sets] - - # Handle groupComp - parameters = { - 'method': ['groupComp'], + 'contrast_id': self.contrast_list, + 'method': ['equalRange', 'equalIndifference', 'groupComp'], 'file': [ 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', join('_threshold0', 'spmT_0001_thr.nii') ], 'nb_subjects' : [str(len(self.subject_list))] } + parameter_sets = product(*parameters.values()) template = join( self.directories.output_dir, 'group_level_analysis_{method}_nsub_{nb_subjects}', - '_contrast_id_0001_model_type_loss', + '_contrast_id_{contrast_id}', '{file}' ) - return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] - return return_list - def get_hypotheses_outputs(self): """ Return all hypotheses output file names. """ nb_sub = len(self.subject_list) files = [ # Hypothesis 1 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 2 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 3 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 4 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_gain', 'spmT_0001.nii'), + '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 5 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', '_threshold1', 'spmT_0002_thr.nii'), + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', 'spmT_0002.nii'), + '_contrast_id_0003', 'spmT_0001.nii'), # Hypothesis 6 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', '_threshold1', 'spmT_0002_thr.nii'), + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', 'spmT_0002.nii'), + '_contrast_id_0003', 'spmT_0001.nii'), # Hypothesis 7 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', 'spmT_0001.nii'), + '_contrast_id_0003', 'spmT_0001.nii'), # Hypothesis 8 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', 'spmT_0001.nii'), + '_contrast_id_0003', 'spmT_0001.nii'), # Hypothesis 9 join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_0001_model_type_loss', 'spmT_0001.nii') + '_contrast_id_0003', 'spmT_0001.nii') ] return [join(self.directories.output_dir, f) for f in files] diff --git a/tests/pipelines/test_team_U26C.py b/tests/pipelines/test_team_U26C.py index b0971e07..fac86525 100644 --- a/tests/pipelines/test_team_U26C.py +++ b/tests/pipelines/test_team_U26C.py @@ -61,10 +61,9 @@ def test_create(): assert pipeline.get_run_level_analysis() is None assert isinstance(pipeline.get_subject_level_analysis(), Workflow) group_level = pipeline.get_group_level_analysis() - - """assert len(group_level) == 3 + assert len(group_level) == 3 for sub_workflow in group_level: - assert isinstance(sub_workflow, Workflow)""" + assert isinstance(sub_workflow, Workflow) @staticmethod @mark.unit_test @@ -72,11 +71,11 @@ def test_outputs(): """ Test the expected outputs of a PipelineTeamU26C object """ pipeline = PipelineTeamU26C() # 1 - 1 subject outputs - """pipeline.subject_list = ['001'] + pipeline.subject_list = ['001'] assert len(pipeline.get_preprocessing_outputs()) == 0 assert len(pipeline.get_run_level_outputs()) == 0 assert len(pipeline.get_subject_level_outputs()) == 7 - assert len(pipeline.get_group_level_outputs()) == 63 + assert len(pipeline.get_group_level_outputs()) == 45 assert len(pipeline.get_hypotheses_outputs()) == 18 # 2 - 4 subjects outputs @@ -84,8 +83,8 @@ def test_outputs(): assert len(pipeline.get_preprocessing_outputs()) == 0 assert len(pipeline.get_run_level_outputs()) == 0 assert len(pipeline.get_subject_level_outputs()) == 28 - assert len(pipeline.get_group_level_outputs()) == 63 - assert len(pipeline.get_hypotheses_outputs()) == 18""" + assert len(pipeline.get_group_level_outputs()) == 45 + assert len(pipeline.get_hypotheses_outputs()) == 18 @staticmethod @mark.unit_test From 7dd85b29adf54a6d8595628d0fb10d1160eea0fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 26 Jan 2024 16:55:29 +0100 Subject: [PATCH 35/41] [BUG] group level analyses names --- narps_open/pipelines/team_U26C.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index d89152fc..05cd30d9 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -414,7 +414,7 @@ def get_group_level_analysis_single_group(self, method): # Create the group level workflow group_level_analysis = Workflow( base_dir = self.directories.working_dir, - name = f'group_level_analysis_nsub_{nb_subjects}') + name = f'group_level_analysis_{method}_nsub_{nb_subjects}') group_level_analysis.connect([ (infosource, selectderivs, [('contrast_id', 'contrast_id')]), (selectderivs, get_contrasts, [('contrasts', 'input_str')]), @@ -436,13 +436,13 @@ def get_group_level_analysis_single_group(self, method): ('spmT_images', 'stat_image') ]), (level2estimate, data_sink, [ - ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), + ('mask_image', f'{group_level_analysis.name}.@mask')]), (level2conestimate, data_sink, [ - ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), - ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), - ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]), + ('spm_mat_file', f'{group_level_analysis.name}.@spm_mat'), + ('spmT_images', f'{group_level_analysis.name}.@T'), + ('con_images', f'{group_level_analysis.name}.@con')]), (threshold, data_sink, [ - ('thresholded_map', f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh')]) + ('thresholded_map', f'{group_level_analysis.name}.@thresh')]) ]) return group_level_analysis @@ -579,13 +579,13 @@ def get_group_level_analysis_group_comparison(self): ('spmT_images', 'stat_image') ]), (level2estimate, data_sink, [ - ('mask_image', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@mask')]), + ('mask_image', f'{group_level_analysis.name}.@mask')]), (level2conestimate, data_sink, [ - ('spm_mat_file', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@spm_mat'), - ('spmT_images', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@T'), - ('con_images', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@con')]), + ('spm_mat_file', f'{group_level_analysis.name}.@spm_mat'), + ('spmT_images', f'{group_level_analysis.name}.@T'), + ('con_images', f'{group_level_analysis.name}.@con')]), (threshold, data_sink, [ - ('thresholded_map', f'group_level_analysis_groupComp_nsub_{nb_subjects}.@thresh')]) + ('thresholded_map', f'{group_level_analysis.name}.@thresh')]) ]) return group_level_analysis From 6fbf78b0989361898fef9c32034431e73737ec38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 30 Jan 2024 14:29:48 +0100 Subject: [PATCH 36/41] Waiting before removing smoothed files --- narps_open/pipelines/team_U26C.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 05cd30d9..a29ca032 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -287,7 +287,7 @@ def get_subject_level_analysis(self): subject_level_analysis.connect([ (smooth, remove_gunzip, [('smoothed_files', '_')]), (gunzip, remove_gunzip, [('out_file', 'file_name')]), - (modelspec, remove_smooth, [('session_info', '_')]), + (data_sink, remove_smooth, [('out_file', '_')]), (smooth, remove_smooth, [('smoothed_files', 'file_name')]) ]) From 94b698b1b7fbb1a0d67c0a438f3336dd2362abb0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 30 Jan 2024 16:10:16 +0100 Subject: [PATCH 37/41] Typo in expected outputs for subject_level_analysis --- narps_open/pipelines/team_U26C.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index a29ca032..67b0caf4 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -298,14 +298,14 @@ def get_subject_level_outputs(self): templates = [join( self.directories.output_dir, - 'subject_level_analysis_loss', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + 'subject_level_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ for contrast_id in self.contrast_list] templates += [join( self.directories.output_dir, - 'subject_level_analysis_loss', '_subject_id_{subject_id}', 'SPM.mat')] + 'subject_level_analysis', '_subject_id_{subject_id}', 'SPM.mat')] templates += [join( self.directories.output_dir, - 'subject_level_analysis_loss', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + 'subject_level_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ for contrast_id in self.contrast_list] # Format with subject_ids From fe46efec67d8de4c9dfd9a073aafe4075a2ea217 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 31 Jan 2024 15:59:32 +0100 Subject: [PATCH 38/41] [BUG] mean not a condition in groupComp analysis --- narps_open/pipelines/team_U26C.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index 67b0caf4..f0ff17aa 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -536,7 +536,7 @@ def get_group_level_analysis_group_comparison(self): level2conestimate = Node(EstimateContrast(), name = 'level2conestimate') level2conestimate.inputs.group_contrast = True level2conestimate.inputs.contrasts = [ - ['Eq range vs Eq indiff in loss', 'T', ['mean'], [1, -1]] + ['Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]] ] # Threshold Node - Create thresholded maps From 1525fcfee9eb9fb93bed1fa9c41c053e8e06b10d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 31 Jan 2024 16:02:40 +0100 Subject: [PATCH 39/41] [TEST] test update for pipeline outputs --- tests/pipelines/test_team_U26C.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/tests/pipelines/test_team_U26C.py b/tests/pipelines/test_team_U26C.py index fac86525..93436ac0 100644 --- a/tests/pipelines/test_team_U26C.py +++ b/tests/pipelines/test_team_U26C.py @@ -72,19 +72,11 @@ def test_outputs(): pipeline = PipelineTeamU26C() # 1 - 1 subject outputs pipeline.subject_list = ['001'] - assert len(pipeline.get_preprocessing_outputs()) == 0 - assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 7 - assert len(pipeline.get_group_level_outputs()) == 45 - assert len(pipeline.get_hypotheses_outputs()) == 18 + helpers.test_pipeline_outputs(pipeline, [0, 0, 7, 45, 18]) # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - assert len(pipeline.get_preprocessing_outputs()) == 0 - assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 28 - assert len(pipeline.get_group_level_outputs()) == 45 - assert len(pipeline.get_hypotheses_outputs()) == 18 + helpers.test_pipeline_outputs(pipeline, [0, 0, 28, 45, 18]) @staticmethod @mark.unit_test From 60a23c35b866066be2f1a00b905b64460f794b09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 6 Feb 2024 16:21:05 +0100 Subject: [PATCH 40/41] Issue with group level contrast definition --- narps_open/pipelines/team_U26C.py | 46 +++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index f0ff17aa..e2e36897 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -194,7 +194,7 @@ def get_subject_level_analysis(self): data_sink.inputs.base_directory = self.directories.output_dir # Gunzip - gunzip files because SPM do not use .nii.gz files - gunzip = MapNode(Gunzip(), name='gunzip', iterfield=['in_file']) + gunzip = MapNode(Gunzip(), name = 'gunzip', iterfield=['in_file']) # Smooth warped functionals. smooth = Node(Smooth(), name = 'smooth') @@ -401,7 +401,8 @@ def get_group_level_analysis_single_group(self, method): # EstimateContrast - estimates simple group contrast level2conestimate = Node(EstimateContrast(), name = 'level2conestimate') level2conestimate.inputs.group_contrast = True - level2conestimate.inputs.contrasts = [['Group', 'T', ['mean'], [1]]] + level2conestimate.inputs.contrasts = [ + ['Group', 'T', ['mean'], [1]], ['Group', 'T', ['mean'], [-1]]] # Threshold Node - Create thresholded maps threshold = Node(Threshold(), name = 'threshold') @@ -409,7 +410,7 @@ def get_group_level_analysis_single_group(self, method): threshold.inputs.height_threshold_type = 'p-value' threshold.inputs.force_activation = False threshold.inputs.height_threshold = 0.05 - threshold.inputs.contrast_index = 1 + threshold.inputs.contrast_index = [1, 2] # Create the group level workflow group_level_analysis = Workflow( @@ -593,12 +594,14 @@ def get_group_level_analysis_group_comparison(self): def get_group_level_outputs(self): """ Return all names for the files the group level analysis is supposed to generate. """ + # Handle equalRange and equalIndifference parameters = { 'contrast_id': self.contrast_list, - 'method': ['equalRange', 'equalIndifference', 'groupComp'], + 'method': ['equalRange', 'equalIndifference'], 'file': [ - 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', - join('_threshold0', 'spmT_0001_thr.nii') + 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', + 'spmT_0001.nii', 'spmT_0002.nii', + join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') ], 'nb_subjects' : [str(len(self.subject_list))] } @@ -610,10 +613,31 @@ def get_group_level_outputs(self): '_contrast_id_{contrast_id}', '{file}' ) + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] - return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + # Handle groupComp + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['groupComp'], + 'file': [ + 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', + join('_threshold0', 'spmT_0001_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] + return return_list + def get_hypotheses_outputs(self): """ Return all hypotheses output file names. """ nb_sub = len(self.subject_list) @@ -640,14 +664,14 @@ def get_hypotheses_outputs(self): '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 5 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0003', 'spmT_0001.nii'), + '_contrast_id_0003', 'spmT_0002.nii'), # Hypothesis 6 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0003', 'spmT_0001.nii'), + '_contrast_id_0003', 'spmT_0002.nii'), # Hypothesis 7 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), From 35ef3ce67e0f4b69861cf18477fae70627203eac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 6 Feb 2024 17:00:09 +0100 Subject: [PATCH 41/41] Issue with group level contrast definition --- narps_open/pipelines/team_U26C.py | 3 ++- tests/pipelines/test_team_U26C.py | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_U26C.py b/narps_open/pipelines/team_U26C.py index e2e36897..4c4e8e73 100755 --- a/narps_open/pipelines/team_U26C.py +++ b/narps_open/pipelines/team_U26C.py @@ -405,7 +405,8 @@ def get_group_level_analysis_single_group(self, method): ['Group', 'T', ['mean'], [1]], ['Group', 'T', ['mean'], [-1]]] # Threshold Node - Create thresholded maps - threshold = Node(Threshold(), name = 'threshold') + threshold = MapNode(Threshold(), name = 'threshold', + iterfield = ['stat_image', 'contrast_index']) threshold.inputs.use_fwe_correction = True threshold.inputs.height_threshold_type = 'p-value' threshold.inputs.force_activation = False diff --git a/tests/pipelines/test_team_U26C.py b/tests/pipelines/test_team_U26C.py index 93436ac0..c670b520 100644 --- a/tests/pipelines/test_team_U26C.py +++ b/tests/pipelines/test_team_U26C.py @@ -72,11 +72,11 @@ def test_outputs(): pipeline = PipelineTeamU26C() # 1 - 1 subject outputs pipeline.subject_list = ['001'] - helpers.test_pipeline_outputs(pipeline, [0, 0, 7, 45, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 0, 7, 8*3*2 + 5*3, 18]) # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - helpers.test_pipeline_outputs(pipeline, [0, 0, 28, 45, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 0, 28, 8*3*2 + 5*3, 18]) @staticmethod @mark.unit_test