From 879271f5254259ab798d59a9ddf9067ba885ae01 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 25 Aug 2023 16:34:39 +0200 Subject: [PATCH 01/19] Dummy change to test 2T6S pipeline on CI with rectified results data --- narps_open/pipelines/team_2T6S.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index 42aa344f..0ec99d75 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -1,7 +1,7 @@ #!/usr/bin/python # coding: utf-8 -""" Write the work of NARPS' team 2T6S using Nipype """ +""" Write the work of NARPS team 2T6S using Nipype """ from os.path import join from itertools import product From 812d518edc7bc1a722bc567c98f8bb2286584038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 29 Aug 2023 09:56:26 +0200 Subject: [PATCH 02/19] [REFAC] 2T6S pipeline --- narps_open/pipelines/team_2T6S.py | 112 +++++++++++++++++------------- 1 file changed, 62 insertions(+), 50 deletions(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index 0ec99d75..0b3746f6 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -78,8 +78,10 @@ def get_subject_infos(event_files, runs): duration[val].append(float(info[4])) # durations for trial (rpz by RT) else: # trial with no response : duration of 4 s duration[val].append(float(4)) - weights_gain[val_gain].append(float(info[2])) # weights gain for trial_run1 - weights_loss[val_loss].append(-1.0 * float(info[3])) # weights loss for trial_run1 + # weights gain for trial_run1 + weights_gain[val_gain].append(float(info[2])) + # weights loss for trial_run1 + weights_loss[val_loss].append(-1.0 * float(info[3])) # Bunching is done per run, i.e. trial_run1, trial_run2, etc. # But names must not have '_run1' etc because we concatenate runs @@ -144,8 +146,8 @@ def get_parameters_file(filepaths, subject_id, working_dir): from os import mkdir from os.path import join, isdir - import pandas as pd - import numpy as np + from pandas import read_csv, DataFrame + from numpy import array, transpose # Handle the case where filepaths is a single path (str) if not isinstance(filepaths, list): @@ -154,17 +156,17 @@ def get_parameters_file(filepaths, subject_id, working_dir): # Create the parameters files parameters_file = [] for file_id, file in enumerate(filepaths): - data_frame = pd.read_csv(file, sep = '\t', header=0) + data_frame = read_csv(file, sep = '\t', header=0) # Extract parameters we want to use for the model - temp_list = np.array([ + temp_list = array([ data_frame['X'], data_frame['Y'], data_frame['Z'], data_frame['RotX'], data_frame['RotY'], data_frame['RotZ']]) - retained_parameters = pd.DataFrame(np.transpose(temp_list)) + retained_parameters = DataFrame(transpose(temp_list)) # Write parameters to a parameters file # TODO : warning !!! filepaths must be ordered (1,2,3,4) for the following code to work - new_path =join(working_dir, 'parameters_file', + new_path = join(working_dir, 'parameters_file', f'parameters_file_sub-{subject_id}_run-{str(file_id + 1).zfill(2)}.tsv') if not isdir(join(working_dir, 'parameters_file')): @@ -187,11 +189,11 @@ def remove_gunzip_files(_, subject_id, working_dir): Parameters: - _: Node input only used for triggering the Node - - subject_id: str, TODO - - working_id: str, TODO + - subject_id: str, subject id from which to remove the unzipped file + - working_dir: str, path to the working directory """ - from shutil import rmtree from os.path import join + from shutil import rmtree try: rmtree(join(working_dir, 'l1_analysis', f'_subject_id_{subject_id}', 'gunzip_func')) @@ -209,11 +211,11 @@ def remove_smoothed_files(_, subject_id, working_dir): Parameters: - _: Node input only used for triggering the Node - - subject_id: str, TODO - - working_id: str, TODO + - subject_id: str, subject id from which to remove the smoothed file + - working_dir: str, path to the working directory """ - from shutil import rmtree from os.path import join + from shutil import rmtree try: rmtree(join(working_dir, 'l1_analysis', f'_subject_id_{subject_id}', 'smooth')) @@ -231,11 +233,7 @@ def get_subject_level_analysis(self): """ # Infosource Node - To iterate on subjects infosource = Node(IdentityInterface( - fields = ['subject_id', 'dataset_dir', 'results_dir', 'working_dir', 'run_list'], - dataset_dir = self.directories.dataset_dir, - results_dir = self.directories.results_dir, - working_dir = self.directories.working_dir, - run_list = self.run_list), + fields = ['subject_id']), name = 'infosource') infosource.iterables = [('subject_id', self.subject_list)] @@ -401,8 +399,10 @@ def get_subset_contrasts(file_list, subject_list, participants_file): Returns : - equal_indifference_id : a list of subject ids in the equalIndifference group - equal_range_id : a list of subject ids in the equalRange group - - equal_indifference_files : a subset of file_list corresponding to subjects in the equalIndifference group - - equal_range_files : a subset of file_list corresponding to subjects in the equalRange group + - equal_indifference_files : a subset of file_list corresponding to + subjects in the equalIndifference group + - equal_range_files : a subset of file_list corresponding to + subjects in the equalRange group """ equal_indifference_id = [] equal_range_id = [] @@ -454,8 +454,7 @@ def get_group_level_analysis_sub_workflow(self, method): # Infosource - iterate over the list of contrasts infosource_groupanalysis = Node( IdentityInterface( - fields = ['contrast_id', 'subjects'], - subjects = self.subject_list), + fields = ['contrast_id', 'subjects']), name = 'infosource_groupanalysis') infosource_groupanalysis.iterables = [('contrast_id', self.contrast_list)] @@ -469,7 +468,7 @@ def get_group_level_analysis_sub_workflow(self, method): } selectfiles_groupanalysis = Node(SelectFiles( - templates, base_directory=self.directories.results_dir, force_list= True), + templates, base_directory = self.directories.results_dir, force_list = True), name = 'selectfiles_groupanalysis') # Datasink - save important files @@ -481,14 +480,14 @@ def get_group_level_analysis_sub_workflow(self, method): # Function node get_subset_contrasts - select subset of contrasts sub_contrasts = Node(Function( function = self.get_subset_contrasts, - input_names = ['file_list', 'method', 'subject_list', 'participants_file'], + input_names = ['file_list', 'subject_list', 'participants_file'], output_names = [ 'equalIndifference_id', 'equalRange_id', 'equalIndifference_files', 'equalRange_files']), name = 'sub_contrasts') - sub_contrasts.inputs.method = method + sub_contrasts.inputs.subject_list = self.subject_list # Estimate model estimate_model = Node(EstimateModel( @@ -513,8 +512,6 @@ def get_group_level_analysis_sub_workflow(self, method): l2_analysis.connect([ (infosource_groupanalysis, selectfiles_groupanalysis, [ ('contrast_id', 'contrast_id')]), - (infosource_groupanalysis, sub_contrasts, [ - ('subjects', 'subject_list')]), (selectfiles_groupanalysis, sub_contrasts, [ ('contrast', 'file_list'), ('participants', 'participants_file')]), @@ -618,29 +615,44 @@ def get_group_level_outputs(self): return return_list def get_hypotheses_outputs(self): - """ Return all hypotheses output file names. - Note that hypotheses 5 to 8 correspond to the maps given by the team in their results ; - but they are not fully consistent with the hypotheses definitions as expected by NARPS. - """ + """ Return all hypotheses output file names. """ nb_sub = len(self.subject_list) files = [ - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0002.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', '_threshold1', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0002_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0002.nii'), - join(f'l2_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii') + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'l2_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii') ] return [join(self.directories.output_dir, f) for f in files] From eeee10e0485f65f158c40dfedad83785fc7d9025 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 29 Aug 2023 10:05:43 +0200 Subject: [PATCH 03/19] [BUG] cleaning connections --- narps_open/pipelines/team_2T6S.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index 0b3746f6..134dc7f3 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -273,6 +273,7 @@ def get_subject_level_analysis(self): input_names = ['event_files', 'runs'], output_names = ['subject_info']), name = 'subject_infos') + subject_infos.inputs.runs = self.run_list # SpecifyModel - generates SPM-specific Model specify_model = Node(SpecifySPMModel( @@ -330,7 +331,6 @@ def get_subject_level_analysis(self): l1_analysis = Workflow(base_dir = self.directories.working_dir, name = 'l1_analysis') l1_analysis.connect([ (infosource, selectfiles, [('subject_id', 'subject_id')]), - (infosource, subject_infos, [('run_list', 'runs')]), (infosource, remove_gunzip_files, [('subject_id', 'subject_id')]), (infosource, remove_smoothed_files, [('subject_id', 'subject_id')]), (subject_infos, specify_model, [('subject_info', 'subject_info')]), From 1b0170e5f76d8a1687af3029b76a0dc1a96582e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 29 Aug 2023 10:11:12 +0200 Subject: [PATCH 04/19] [BUG] cleaning connections --- narps_open/pipelines/team_2T6S.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index 134dc7f3..2984b5a9 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -333,13 +333,11 @@ def get_subject_level_analysis(self): (infosource, selectfiles, [('subject_id', 'subject_id')]), (infosource, remove_gunzip_files, [('subject_id', 'subject_id')]), (infosource, remove_smoothed_files, [('subject_id', 'subject_id')]), + (infosource, parameters, [('subject_id', 'subject_id')]), (subject_infos, specify_model, [('subject_info', 'subject_info')]), (contrasts, contrast_estimate, [('contrasts', 'contrasts')]), (selectfiles, parameters, [('param', 'filepaths')]), (selectfiles, subject_infos, [('event', 'event_files')]), - (infosource, parameters, [ - ('subject_id', 'subject_id'), - ('working_dir', 'working_dir')]), (selectfiles, gunzip_func, [('func', 'in_file')]), (gunzip_func, smooth, [('out_file', 'in_files')]), (smooth, specify_model, [('smoothed_files', 'functional_runs')]), From 360dcd09d625995b79fc22499e652a22dc26a399 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 30 Aug 2023 11:27:30 +0200 Subject: [PATCH 05/19] [TEST] init a test for conftest.py --- tests/test_conftest.py | 139 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 tests/test_conftest.py diff --git a/tests/test_conftest.py b/tests/test_conftest.py new file mode 100644 index 00000000..5b1a1f4f --- /dev/null +++ b/tests/test_conftest.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'conftest.py' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_conftest.py + pytest -q test_conftest.py -k +""" + +from os import remove +from os.path import join, isfile, abspath +from pathlib import Path + +from datetime import datetime + +from pytest import raises, mark + +from nipype import Node, Workflow +from nipype.interfaces.utility import Function + +from narps_open.utils.configuration import Configuration +from narps_open.runner import PipelineRunner +from narps_open.pipelines import Pipeline +from narps_open.pipelines.team_2T6S import PipelineTeam2T6S + +class MockupPipeline(Pipeline): + """ A simple Pipeline class for test purposes """ + + def __init__(self): + super().__init__() + self.test_file = abspath( + join(Configuration()['directories']['test_runs'], 'test_conftest.txt')) + if isfile(self.test_file): + remove(self.test_file) + + def __del__(self): + if isfile(self.test_file): + remove(self.test_file) + + # @staticmethod + def write_to_file(_, text_to_write: str, file_path: str): + """ Method used inside a nipype Node, to write a line in a test file """ + with open(file_path, 'a', encoding = 'utf-8') as file: + file.write(text_to_write) + + def create_workflow(self, workflow_name: str): + """ Return a nipype workflow with two nodes writing in a file """ + node_1 = Node(Function( + input_names = ['_', 'text_to_write', 'file_path'], + output_names = ['_'], + function = self.write_to_file), + name = 'node_1' + ) + # this input is set to now(), so that it changes at every run, thus preventing + # nipype's cache to work + node_1.inputs._ = datetime.now() + node_1.inputs.text_to_write = 'MockupPipeline : '+workflow_name+' node_1\n' + node_1.inputs.file_path = self.test_file + + node_2 = Node(Function( + input_names = ['_', 'text_to_write', 'file_path'], + output_names = [], + function = self.write_to_file), + name = 'node_2' + ) + node_2.inputs.text_to_write = 'MockupPipeline : '+workflow_name+' node_2\n' + node_2.inputs.file_path = self.test_file + + workflow = Workflow( + base_dir = Configuration()['directories']['test_runs'], + name = workflow_name + ) + workflow.add_nodes([node_1, node_2]) + workflow.connect(node_1, '_', node_2, '_') + + return workflow + + def get_preprocessing(self): + """ Return a fake preprocessing workflow """ + return self.create_workflow('TestPipelineRunner_preprocessing_workflow') + + def get_run_level_analysis(self): + """ Return a fake run level workflow """ + return self.create_workflow('TestPipelineRunner_run_level_workflow') + + def get_subject_level_analysis(self): + """ Return a fake subject level workflow """ + return self.create_workflow('TestPipelineRunner_subject_level_workflow') + + def get_group_level_analysis(self): + """ Return a fake subject level workflow """ + return self.create_workflow('TestPipelineRunner_group_level_workflow') + + def get_preprocessing_outputs(self): + """ Return a list of templates of the output files generated by the preprocessing """ + return [join(Configuration()['directories']['test_runs'], 'preprocessing_output.md')] + + def get_run_level_outputs(self): + """ Return a list of templates of the output files generated by the run level analysis. + Templates are expressed relatively to the self.directories.output_dir. + """ + return [join(Configuration()['directories']['test_runs'], 'run_output.md')] + + def get_subject_level_outputs(self): + """ Return a list of templates of the output files generated by the subject level analysis. + Templates are expressed relatively to the self.directories.output_dir. + """ + templates = [ + join(Configuration()['directories']['test_runs'], 'subject_{subject_id}_output_1.md'), + join(Configuration()['directories']['test_runs'], 'subject_{subject_id}_output_2.md') + ] + return_list = [] + for subject_id in self.subject_list: + return_list += [t.format(subject_id = subject_id) for t in templates] + + return return_list + + def get_group_level_outputs(self): + """ Return a list of templates of the output files generated by the group level analysis. + Templates are expressed relatively to the self.directories.output_dir. + """ + templates = [ + join(Configuration()['directories']['test_runs'], 'group_{nb_subjects}_output_a.md'), + join(Configuration()['directories']['test_runs'], 'group_{nb_subjects}_output_b.md') + ] + return [t.format(nb_subjects = len(self.subject_list)) for t in templates] + + def get_hypotheses_outputs(self): + """ Return the names of the files used by the team to answer the hypotheses of NARPS. + """ + template = join(Configuration()['directories']['test_runs'], 'hypothesis_{id}.md') + return [template.format(id = i) for i in range(1,18)] + +class TestPipelineRunner: + """ A class that contains all the unit tests for the PipelineRunner class.""" From babdf1b8a6181812ded3e3f037ce1e39324e0d2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 31 Aug 2023 14:22:43 +0200 Subject: [PATCH 06/19] [ENH] modifying actions workflows --- .github/workflows/codespell.yml | 4 ++-- .github/workflows/pipeline_tests.yml | 11 ++++++++++- .github/workflows/test_changes.yml | 2 +- .github/workflows/unit_tests.yml | 16 ++++------------ 4 files changed, 17 insertions(+), 16 deletions(-) diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml index 3ebbf550..0e528aa4 100644 --- a/.github/workflows/codespell.yml +++ b/.github/workflows/codespell.yml @@ -1,5 +1,5 @@ ---- -name: Codespell +## Disclaimer - This GitHub Actions workflow performs a check for spelling errors inside code comments and documentation +name: codespell on: push: diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index f6cef4b4..f90d1cfd 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -12,7 +12,6 @@ on: # Jobs that define the workflow jobs: - # A job to list the tests to be run identify-tests: runs-on: ubuntu-latest @@ -73,7 +72,17 @@ jobs: pytest -s -q -m "pipeline_test" ${{ needs.identify-tests.outputs.tests }} fi + - name: Archive pipeline execution failures + if: ${{ failure() }} # Run only if previous job fails + uses: actions/upload-artifact@v3 + with: + name: pipeline_tests-reports + path: | + crash-*.pklz + retention-days: 15 + - name: Report results on GitHub + if: ${{ always() }} run: | # Start report echo "# Correlation values" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/test_changes.yml b/.github/workflows/test_changes.yml index c33e7cb0..b7f5d941 100644 --- a/.github/workflows/test_changes.yml +++ b/.github/workflows/test_changes.yml @@ -54,7 +54,7 @@ jobs: python -m pip install --upgrade pip pip install .[tests] - - name: Collect tests with pytest + - name: Execute tests with pytest run: | if [[ "${{ needs.identify-tests.outputs.tests }}" != "" ]]; then pytest -s -q ${{ needs.identify-tests.outputs.tests }} diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 4859e55d..20f20ea3 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -27,24 +27,15 @@ jobs: pytest: # Define the runner for this job - runs-on: ubuntu-latest + runs-on: self-hosted # Steps that define the job steps: - name: Checkout repository uses: actions/checkout@v3 - - name: Set up Python 3.9 - uses: actions/setup-python@v3 - with: - python-version: 3.9 - - - uses: actions/cache@v3 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} - restore-keys: | - ${{ runner.os }}-pip- + - name: Load configuration for self-hosted runner + run: cp /home/neuro/local_testing_config.toml narps_open/utils/configuration/testing_config.toml - name: Install dependencies run: | @@ -58,6 +49,7 @@ jobs: coverage xml - name: Archive pytest results + if: ${{ failure() }} # Run only if previous job fails uses: actions/upload-artifact@v3 with: name: unit_tests-reports From 6986e90351bca4071ea61387bee1f52f1125aceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 31 Aug 2023 14:35:10 +0200 Subject: [PATCH 07/19] [BUG] inside unit_tests workflow --- .github/workflows/unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 20f20ea3..d0097882 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -34,7 +34,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - - name: Load configuration for self-hosted runner + - name: Load configuration for self-hosted runner run: cp /home/neuro/local_testing_config.toml narps_open/utils/configuration/testing_config.toml - name: Install dependencies From 0f8365a1e846b3b3a671fc866b50a7c6066f5e71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 1 Sep 2023 18:03:41 +0200 Subject: [PATCH 08/19] [TEST] testing the conftest module --- narps_open/data/results/__init__.py | 6 +- tests/conftest.py | 13 +- tests/data/test_results.py | 4 +- tests/test_conftest.py | 254 +++++++++++++++++++++------- 4 files changed, 205 insertions(+), 72 deletions(-) diff --git a/narps_open/data/results/__init__.py b/narps_open/data/results/__init__.py index a1108034..c36e6106 100644 --- a/narps_open/data/results/__init__.py +++ b/narps_open/data/results/__init__.py @@ -56,7 +56,7 @@ def get_uid(self): def get_file_urls(self): """ Return a dict containing the download url for each file of the collection. - * dict key is the file base name (with no extension) + * dict key is the file base name (with extension) * dict value is the download url for the file on Neurovault """ @@ -69,7 +69,7 @@ def get_file_urls(self): file_urls = {} for result in json['results']: # Get data for a file in the collection - file_urls[result['name']] = result['file'] + file_urls[result['name']+'.nii.gz'] = result['file'] return file_urls @@ -84,7 +84,7 @@ def download(self): for file_name, file_url in self.files.items(): urlretrieve( file_url, - join(self.directory, file_name+'.nii.gz'), + join(self.directory, file_name), show_download_progress ) diff --git a/tests/conftest.py b/tests/conftest.py index d740eeac..5ca91a07 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -58,7 +58,9 @@ def test_pipeline_execution( # Get missing subjects missing_subjects = set() for file in runner.get_missing_first_level_outputs(): - missing_subjects.add(get_subject_id(file)) + subject_id = get_subject_id(file) + if subject_id is not None: + missing_subjects.add(subject_id) # Leave if no missing subjects if not missing_subjects: @@ -66,7 +68,10 @@ def test_pipeline_execution( # Start pipeline runner.subjects = missing_subjects - runner.start(True, False) + try: # This avoids errors in the workflow to make the test fail + runner.start(True, False) + except(RuntimeError) as err: + print('RuntimeError: ', err) # Check missing files for the last time missing_files = runner.get_missing_first_level_outputs() @@ -80,7 +85,6 @@ def test_pipeline_execution( # Indices and keys to the unthresholded maps indices = list(range(1, 18, 2)) - keys = [f'hypo{i}_unthresh.nii.gz' for i in range(1, 10)] # Retrieve the paths to the reproduced files reproduced_files = runner.pipeline.get_hypotheses_outputs() @@ -88,7 +92,8 @@ def test_pipeline_execution( # Retrieve the paths to the results files collection = ResultsCollection(team_id) - results_files = [join(collection.directory, collection.files[k]) for k in keys] + results_files = [join(collection.directory, f) for f in collection.files.keys()] + results_files = [results_files[i] for i in indices] # Compute the correlation coefficients return [ diff --git a/tests/data/test_results.py b/tests/data/test_results.py index b32abeed..46d2d1f5 100644 --- a/tests/data/test_results.py +++ b/tests/data/test_results.py @@ -48,7 +48,7 @@ def test_create(): assert collection.uid == '4881' assert 'results/orig/4881_2T6S' in collection.directory test_str = 'http://neurovault.org/media/images/4881/hypo1_thresholded_revised.nii.gz' - assert collection.files['hypo1_thresh'] == test_str + assert collection.files['hypo1_thresh.nii.gz'] == test_str collection = ResultsCollection('43FJ') assert collection.team_id == '43FJ' @@ -56,7 +56,7 @@ def test_create(): assert 'results/orig/4824_43FJ' in collection.directory test_str = 'http://neurovault.org/media/images/4824/' test_str += 'Zstat_Thresholded_Negative_Effect_of_Loss_Equal_Indifference.nii.gz' - assert collection.files['hypo5_thresh'] == test_str + assert collection.files['hypo5_thresh.nii.gz'] == test_str @staticmethod @mark.unit_test diff --git a/tests/test_conftest.py b/tests/test_conftest.py index 5b1a1f4f..4dfce4d2 100644 --- a/tests/test_conftest.py +++ b/tests/test_conftest.py @@ -11,13 +11,13 @@ pytest -q test_conftest.py -k """ -from os import remove -from os.path import join, isfile, abspath -from pathlib import Path +from os import makedirs +from os.path import join, abspath, isdir, isfile +from shutil import rmtree from datetime import datetime -from pytest import raises, mark +from pytest import raises, mark, helpers, fixture from nipype import Node, Workflow from nipype.interfaces.utility import Function @@ -25,115 +25,243 @@ from narps_open.utils.configuration import Configuration from narps_open.runner import PipelineRunner from narps_open.pipelines import Pipeline -from narps_open.pipelines.team_2T6S import PipelineTeam2T6S +from narps_open.data.results import ResultsCollection + +TEST_DIR = abspath(join(Configuration()['directories']['test_runs'], 'test_conftest')) + +@fixture +def set_test_directory(scope = 'function'): + rmtree(TEST_DIR, ignore_errors = True) + makedirs(TEST_DIR, exist_ok = True) + + yield + + # Comment this line for debugging + #rmtree(TEST_DIR, ignore_errors = True) class MockupPipeline(Pipeline): """ A simple Pipeline class for test purposes """ def __init__(self): super().__init__() - self.test_file = abspath( - join(Configuration()['directories']['test_runs'], 'test_conftest.txt')) - if isfile(self.test_file): - remove(self.test_file) - - def __del__(self): - if isfile(self.test_file): - remove(self.test_file) - - # @staticmethod - def write_to_file(_, text_to_write: str, file_path: str): - """ Method used inside a nipype Node, to write a line in a test file """ - with open(file_path, 'a', encoding = 'utf-8') as file: - file.write(text_to_write) - - def create_workflow(self, workflow_name: str): - """ Return a nipype workflow with two nodes writing in a file """ - node_1 = Node(Function( - input_names = ['_', 'text_to_write', 'file_path'], - output_names = ['_'], - function = self.write_to_file), - name = 'node_1' + self.test_file = join(TEST_DIR, 'test_conftest.txt') + + # Init the test_file : write a number of execution set to zero + with open(self.test_file, 'w', encoding = 'utf-8') as file: + file.write(str(0)) + + def update_execution_count(_, file_path: str): + """ Method used inside a nipype Node, to update the execution count inside the file. + Arguments: + - file_path:str, path to the execution count file + + Return: the updated number of executions + """ + + # Read file counter + execution_counter = 0 + with open(file_path, 'r', encoding = 'utf-8') as file: + execution_counter = int(file.readline()) + + execution_counter += 1 + + # Write execution count back + with open(file_path, 'w', encoding = 'utf-8') as file: + file.write(str(execution_counter)) + + return execution_counter + + def decide_exception(execution_counter: int): + """ Method used inside a nipype Node, to simulate an exception during one run """ + if execution_counter == 1: + raise AttributeError + + def write_files(_, file_list: list, execution_counter: int): + """ Method used inside a nipype Node, to create a set of files """ + from pathlib import Path + + if execution_counter != 0: + for file_path in file_list: + Path(file_path).touch() + + def create_workflow(self, workflow_name: str, file_list: list): + """ Return a nipype workflow with two nodes. + First node updates the number of executions of the workflow? + Second node produces an exception, every two executions of the workflow. + Third node writes the output files, except once every three executions + of the workflow. + + Arguments: + - workflow_name: str, the name of the workflow to create + - file_list: list, list of the files that the workflow is supposed to generate + """ + node_count = Node(Function( + input_names = ['_', 'file_path'], + output_names = ['execution_counter'], + function = self.update_execution_count), + name = 'node_count' ) # this input is set to now(), so that it changes at every run, thus preventing # nipype's cache to work - node_1.inputs._ = datetime.now() - node_1.inputs.text_to_write = 'MockupPipeline : '+workflow_name+' node_1\n' - node_1.inputs.file_path = self.test_file + node_count.inputs._ = datetime.now() + node_count.inputs.file_path = self.test_file + + node_decide = Node(Function( + input_names = ['execution_counter'], + output_names = ['_'], + function = self.decide_exception), + name = 'node_decide' + ) - node_2 = Node(Function( - input_names = ['_', 'text_to_write', 'file_path'], + node_files = Node(Function( + input_names = ['_', 'file_list', 'execution_counter'], output_names = [], - function = self.write_to_file), - name = 'node_2' + function = self.write_files), + name = 'node_files' ) - node_2.inputs.text_to_write = 'MockupPipeline : '+workflow_name+' node_2\n' - node_2.inputs.file_path = self.test_file + node_files.inputs.file_list = file_list workflow = Workflow( - base_dir = Configuration()['directories']['test_runs'], + base_dir = TEST_DIR, name = workflow_name ) - workflow.add_nodes([node_1, node_2]) - workflow.connect(node_1, '_', node_2, '_') + workflow.add_nodes([node_count, node_decide, node_files]) + workflow.connect(node_count, 'execution_counter', node_decide, 'execution_counter') + workflow.connect(node_count, 'execution_counter', node_files, 'execution_counter') + workflow.connect(node_decide, '_', node_files, '_') return workflow def get_preprocessing(self): """ Return a fake preprocessing workflow """ - return self.create_workflow('TestPipelineRunner_preprocessing_workflow') + return self.create_workflow( + 'TestConftest_preprocessing_workflow', + self.get_preprocessing_outputs() + ) def get_run_level_analysis(self): """ Return a fake run level workflow """ - return self.create_workflow('TestPipelineRunner_run_level_workflow') + return self.create_workflow( + 'TestConftest_run_level_workflow', + self.get_run_level_outputs() + ) def get_subject_level_analysis(self): """ Return a fake subject level workflow """ - return self.create_workflow('TestPipelineRunner_subject_level_workflow') + return self.create_workflow( + 'TestConftest_subject_level_workflow', + self.get_subject_level_outputs() + ) def get_group_level_analysis(self): """ Return a fake subject level workflow """ - return self.create_workflow('TestPipelineRunner_group_level_workflow') + return self.create_workflow( + 'TestConftest_group_level_workflow', + self.get_group_level_outputs() + ) def get_preprocessing_outputs(self): """ Return a list of templates of the output files generated by the preprocessing """ - return [join(Configuration()['directories']['test_runs'], 'preprocessing_output.md')] + template = join(TEST_DIR, 'subject_id_{subject_id}_output_preprocessing_1.md') + return [template.format(subject_id = s) for s in self.subject_list] def get_run_level_outputs(self): """ Return a list of templates of the output files generated by the run level analysis. Templates are expressed relatively to the self.directories.output_dir. """ - return [join(Configuration()['directories']['test_runs'], 'run_output.md')] + template = join(TEST_DIR, 'subject_id_{subject_id}_output_run_1.md') + return [template.format(subject_id = s) for s in self.subject_list] def get_subject_level_outputs(self): """ Return a list of templates of the output files generated by the subject level analysis. Templates are expressed relatively to the self.directories.output_dir. """ - templates = [ - join(Configuration()['directories']['test_runs'], 'subject_{subject_id}_output_1.md'), - join(Configuration()['directories']['test_runs'], 'subject_{subject_id}_output_2.md') - ] - return_list = [] - for subject_id in self.subject_list: - return_list += [t.format(subject_id = subject_id) for t in templates] - - return return_list + template = join(TEST_DIR, 'subject_id_{subject_id}_output_analysis_1.md') + return [template.format(subject_id = s) for s in self.subject_list] def get_group_level_outputs(self): """ Return a list of templates of the output files generated by the group level analysis. Templates are expressed relatively to the self.directories.output_dir. """ templates = [ - join(Configuration()['directories']['test_runs'], 'group_{nb_subjects}_output_a.md'), - join(Configuration()['directories']['test_runs'], 'group_{nb_subjects}_output_b.md') + join(TEST_DIR, 'group_{nb_subjects}_output_a.md'), + join(TEST_DIR, 'group_{nb_subjects}_output_b.md') ] - return [t.format(nb_subjects = len(self.subject_list)) for t in templates] + return_list = [t.format(nb_subjects = len(self.subject_list)) for t in templates] + + template = join(TEST_DIR, 'hypothesis_{id}.md') + return_list += [template.format(id = i) for i in range(1,19)] + + return return_list def get_hypotheses_outputs(self): - """ Return the names of the files used by the team to answer the hypotheses of NARPS. - """ - template = join(Configuration()['directories']['test_runs'], 'hypothesis_{id}.md') - return [template.format(id = i) for i in range(1,18)] + """ Return the names of the files used by the team to answer the hypotheses of NARPS. """ + template = join(TEST_DIR, 'hypothesis_{id}.md') + return [template.format(id = i) for i in range(1,19)] + +class TestConftest: + """ A class that contains all the unit tests for the conftest module.""" + + @staticmethod + @mark.unit_test + def test_test_correlation_results(mocker): + """ Test the test_correlation_result helper """ + + mocker.patch( + 'conftest.Configuration', + return_value = { + 'testing': { + 'pipelines': { + 'correlation_thresholds' : [0.30, 0.70, 0.80, 0.85, 0.93] + } + } + } + ) + + assert helpers.test_correlation_results( + [0.35, 0.35, 0.36, 0.37, 0.38, 0.39, 0.99, 0.82, 0.40], 20) + assert helpers.test_correlation_results( + [0.75, 0.75, 0.76, 0.77, 0.88, 0.79, 0.99, 0.82, 0.80], 40) + assert helpers.test_correlation_results( + [0.85, 0.85, 0.86, 0.87, 0.98, 0.99, 0.99, 0.82, 0.81], 60) + assert helpers.test_correlation_results( + [0.86, 0.86, 0.86, 0.87, 0.88, 0.89, 0.99, 0.92, 0.95], 80) + assert helpers.test_correlation_results( + [0.95, 0.95, 0.96, 0.97, 0.98, 0.99, 0.99, 0.95, 1.0], 108) + assert not helpers.test_correlation_results( + [0.3, 0.29, 0.3, 0.3, 0.3, 0.39, 0.99, 0.82, 0.40], 20) + assert not helpers.test_correlation_results( + [0.60, 0.7, 0.7, 0.7, 0.8, 0.79, 0.99, 0.82, 0.80], 40) + assert not helpers.test_correlation_results( + [0.8, 0.79, 0.8, 0.8, 0.9, 0.99, 0.99, 0.82, 0.81], 60) + assert not helpers.test_correlation_results( + [0.8, 0.8, 0.8, 0.8, 0.88, 0.89, 0.99, 0.92, 0.95], 80) + assert not helpers.test_correlation_results( + [0.99, 0.99, 0.99, 1., 1., 1., -1., 0.95, 1.0], 108) + + @staticmethod + @mark.unit_test + def test_test_pipeline_execution(mocker, set_test_directory): + """ Test the test_pipeline_execution helper """ + + # Create mocks + mocker.patch('conftest.get_correlation_coefficient', return_value = 1.0) + fake_runner = PipelineRunner('2T6S') + fake_runner._pipeline = MockupPipeline() + mocker.patch('conftest.PipelineRunner', return_value = fake_runner) + fake_collection = ResultsCollection('2T6S') + mocker.patch('conftest.ResultsCollection', return_value = fake_collection) + + # Run pipeline + helpers.test_pipeline_execution('test_conftest', 20) + + # Check outputs + assert isdir(join(TEST_DIR, 'TestConftest_preprocessing_workflow')) + assert isdir(join(TEST_DIR, 'TestConftest_run_level_workflow')) + assert isdir(join(TEST_DIR, 'TestConftest_subject_level_workflow')) + assert isdir(join(TEST_DIR, 'TestConftest_group_level_workflow')) -class TestPipelineRunner: - """ A class that contains all the unit tests for the PipelineRunner class.""" + @staticmethod + @mark.unit_test + def test_test_pipeline_evaluation(): + """ Test the test_pipeline_evaluation helper """ From a653f8b4c2a10ef291917985394d60846c8fbc19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 4 Sep 2023 09:30:11 +0200 Subject: [PATCH 09/19] Issue with parameters dir creation --- narps_open/pipelines/team_2T6S.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index 2984b5a9..df4ec849 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -169,8 +169,7 @@ def get_parameters_file(filepaths, subject_id, working_dir): new_path = join(working_dir, 'parameters_file', f'parameters_file_sub-{subject_id}_run-{str(file_id + 1).zfill(2)}.tsv') - if not isdir(join(working_dir, 'parameters_file')): - mkdir(join(working_dir, 'parameters_file')) + makedirs(join(working_dir, 'parameters_file'), exist_ok = True) with open(new_path, 'w') as writer: writer.write(retained_parameters.to_csv( @@ -616,38 +615,47 @@ def get_hypotheses_outputs(self): """ Return all hypotheses output file names. """ nb_sub = len(self.subject_list) files = [ + # Hypothesis 1 join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 2 join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 3 join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 4 join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 5 join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0002.nii'), + # Hypothesis 6 join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0002.nii'), + # Hypothesis 7 join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii'), + # Hypothesis 8 join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'l2_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii'), + # Hypothesis 9 join(f'l2_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), join(f'l2_analysis_groupComp_nsub_{nb_sub}', From 1642bd8cd93cff293e10474c2e7b030aa68e49f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Sep 2023 14:34:54 +0200 Subject: [PATCH 10/19] [CI] correlation results displayed in the wiki --- .github/workflows/pipeline_tests.yml | 33 ++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index 66e91a91..bb898ca4 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -99,3 +99,36 @@ jobs: do cat test_pipeline-$team.txt >> $GITHUB_STEP_SUMMARY done + + - name: Checkout wiki + uses: actions/checkout@v3 + if: ${{ github.ref }} == 'refs/heads/master' + with: + repository: ${{github.repository}}.wiki + path: wiki + + - name: Report results on the project's wiki + if: ${{ github.ref }} == 'refs/heads/master' + run: | + # Loop through test report files & write them to the corresponding wiki page + for team in ${{ needs.identify-tests.outputs.teams }} + do + echo "# Correlation values" > wiki/pipeline_$team.md + echo "Unthresholded maps, reproduced vs. results" >> wiki/pipeline_$team.md + echo "Correlation values are sorted from hypotheses 1 to 9" >> wiki/pipeline_$team.md + echo "These values were calculated by GitHub Actions run [# $GITHUB_RUN_ID]($GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID)" >> wiki/pipeline_$team.md + echo "| Team | Number of subjects | Test status | Correlation values |" >> wiki/pipeline_$team.md + echo "| -------- | ------- | ------- | ------- |" >> wiki/pipeline_$team.md + done + + # Push changes + cd wiki + git config user.name github-actions + git config user.email github-actions@github.com + + # Test if there are changes in the repository before commiting + if [[ $(git diff --name-only) ]]; then + git add . + git commit -m "Pipeline status updated" + git push + fi From e56bb630aec584e2dfbbf6e081f7273a5e922f0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Sep 2023 15:27:42 +0200 Subject: [PATCH 11/19] Bug with makedirs import --- narps_open/pipelines/team_2T6S.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index df4ec849..ea4f38c7 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -143,7 +143,7 @@ def get_parameters_file(filepaths, subject_id, working_dir): Return : - parameters_file : paths to new files containing only desired parameters. """ - from os import mkdir + from os import makedirs from os.path import join, isdir from pandas import read_csv, DataFrame From 68866c0503d2688aaabf3fc7c6dd1fa6c7d6257a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 7 Sep 2023 11:54:28 +0200 Subject: [PATCH 12/19] [DOC] update documentation about workflows --- .github/workflows/pipeline_tests.yml | 3 ++- docs/ci-cd.md | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index bb898ca4..d9966bf7 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -113,12 +113,13 @@ jobs: # Loop through test report files & write them to the corresponding wiki page for team in ${{ needs.identify-tests.outputs.teams }} do - echo "# Correlation values" > wiki/pipeline_$team.md + echo "# Correlation values for pipeline $team" > wiki/pipeline_$team.md echo "Unthresholded maps, reproduced vs. results" >> wiki/pipeline_$team.md echo "Correlation values are sorted from hypotheses 1 to 9" >> wiki/pipeline_$team.md echo "These values were calculated by GitHub Actions run [# $GITHUB_RUN_ID]($GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID)" >> wiki/pipeline_$team.md echo "| Team | Number of subjects | Test status | Correlation values |" >> wiki/pipeline_$team.md echo "| -------- | ------- | ------- | ------- |" >> wiki/pipeline_$team.md + cat test_pipeline-$team.txt >> wiki/pipeline_$team.md done # Push changes diff --git a/docs/ci-cd.md b/docs/ci-cd.md index c292eed1..662649cb 100644 --- a/docs/ci-cd.md +++ b/docs/ci-cd.md @@ -36,7 +36,8 @@ For now, the following workflows are set up: | ----------- | ----------- | ----------- | ----------- | ----------- | | [code_quality](/.github/workflows/code_quality.yml) | A static analysis of the python code (see the [testing](/docs/testing.md) topic of the documentation for more information). | For every push or pull_request if there are changes on `.py` files. | On GitHub servers. | Outputs (logs of pylint) are stored as [downloadable artifacts](https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts) during 15 days after the push. | | [codespell](/.github/workflows/codespell.yml) | A static analysis of the text files for commonly made typos using [codespell](codespell-project/codespell: check code for common misspellings). | For every push or pull_request to the `maint` branch. | On GitHub servers. | Outputs (logs of codespell) are stored as [downloadable artifacts](https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts) during 15 days after the push. | -| [pipeline_tests](/.github/workflows/pipelines.yml) | Runs all the tests for changed pipelines. | For every push or pull_request, if a pipeline file changed. | On Empenn runners. | Outputs (logs of pytest) are stored as downloadable artifacts during 15 days after the push. | +| [pipeline_status](/.github/workflows/pipeline_status.yml) | Generates a page with pipelines statuses. | For every push on the `main` branch, if a pipeline file changed. Or if issues changed. | On GitHub servers. | Outputs (status table) is pushed to the dedicated [page](https://github.com/Inria-Empenn/narps_open_pipelines/wiki/pipeline_status) on the project's wiki. | +| [pipeline_tests](/.github/workflows/pipeline_tests.yml) | Runs all the tests for changed pipelines. | For every push or pull_request, if a pipeline file changed. | On Empenn runners. | Outputs (logs of pytest) are stored as downloadable artifacts during 15 days after the push. Results of pipeline are pushed to a dedicated page on the project's wiki. | | [test_changes](/.github/workflows/test_changes.yml) | It runs all the changed tests for the project. | For every push or pull_request, if a test file changed. | On Empenn runners. | Outputs (logs of pytest) are stored as downloadable artifacts during 15 days after the push. | | [unit_testing](/.github/workflows/unit_testing.yml) | It runs all the unit tests for the project (see the [testing](/docs/testing.md) topic of the documentation for more information). | For every push or pull_request, if a file changed inside `narps_open/`, or a file related to test execution. | On GitHub servers. | Outputs (logs of pytest) are stored as downloadable artifacts during 15 days after the push. | From e8b154efa6252bd0d899eadc1881203114e52d3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 7 Sep 2023 13:40:55 +0200 Subject: [PATCH 13/19] Report results for pull_requests as well --- .github/workflows/pipeline_tests.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index d9966bf7..d14ff632 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -82,7 +82,8 @@ jobs: crash-*.pklz retention-days: 15 - - name: Report results on GitHub + - name: Report results on step summary + id: summary if: ${{ always() }} run: | # Start report @@ -102,13 +103,13 @@ jobs: - name: Checkout wiki uses: actions/checkout@v3 - if: ${{ github.ref }} == 'refs/heads/master' + if: ${{ steps.summary.conclusion == 'success' }} # Run only if previous job succeed with: repository: ${{github.repository}}.wiki path: wiki - name: Report results on the project's wiki - if: ${{ github.ref }} == 'refs/heads/master' + if: ${{ steps.summary.conclusion == 'success' }} run: | # Loop through test report files & write them to the corresponding wiki page for team in ${{ needs.identify-tests.outputs.teams }} From 50e28285c6df69c89059fa6bb2fe3a381b967610 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 8 Sep 2023 14:50:10 +0200 Subject: [PATCH 14/19] Change job trigger condition --- .github/workflows/pipeline_tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index d14ff632..08389565 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -103,13 +103,13 @@ jobs: - name: Checkout wiki uses: actions/checkout@v3 - if: ${{ steps.summary.conclusion == 'success' }} # Run only if previous job succeed + needs: summary with: repository: ${{github.repository}}.wiki path: wiki - name: Report results on the project's wiki - if: ${{ steps.summary.conclusion == 'success' }} + needs: summary run: | # Loop through test report files & write them to the corresponding wiki page for team in ${{ needs.identify-tests.outputs.teams }} From 7368586a25be9ab8a0b60143917357559a0afbfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 8 Sep 2023 15:02:26 +0200 Subject: [PATCH 15/19] Dummy change to test 2T6S pipeline --- narps_open/pipelines/team_2T6S.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index ea4f38c7..c68d7acc 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -1,7 +1,7 @@ #!/usr/bin/python # coding: utf-8 -""" Write the work of NARPS team 2T6S using Nipype """ +""" Write the work of NARPS' team 2T6S using Nipype """ from os.path import join from itertools import product From 399aa35e68d3a58677655b9181f20e7b1760c33c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 8 Sep 2023 15:16:34 +0200 Subject: [PATCH 16/19] Change job trigger condition --- .github/workflows/pipeline_tests.yml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index 08389565..7db3dd8c 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -83,7 +83,6 @@ jobs: retention-days: 15 - name: Report results on step summary - id: summary if: ${{ always() }} run: | # Start report @@ -102,14 +101,14 @@ jobs: done - name: Checkout wiki + if: ${{ always() }} uses: actions/checkout@v3 - needs: summary with: repository: ${{github.repository}}.wiki path: wiki - name: Report results on the project's wiki - needs: summary + if: ${{ always() }} run: | # Loop through test report files & write them to the corresponding wiki page for team in ${{ needs.identify-tests.outputs.teams }} From 048368021b4ce33220596e3d930adea87c8b5dc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 14 Sep 2023 17:43:59 +0200 Subject: [PATCH 17/19] [CI] create an export job for the pipeline_tests workflow --- .github/workflows/pipeline_tests.yml | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index 7db3dd8c..499ce94c 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -73,6 +73,15 @@ jobs: pytest -s -q -m "pipeline_test" ${{ needs.identify-tests.outputs.tests }} fi + - name: Export test results + if: ${{ always() }} + run: | + for team in ${{ needs.identify-tests.outputs.teams }} + do + report_value=$(> $GITHUB_ENV + done + - name: Archive pipeline execution failures if: ${{ failure() }} # Run only if previous job fails uses: actions/upload-artifact@v3 @@ -82,8 +91,13 @@ jobs: crash-*.pklz retention-days: 15 + # A job to report the test results + tests-report: + needs: [identify-tests, pytest] + runs-on: ubuntu-latest + if: ${{ always() }} + steps: - name: Report results on step summary - if: ${{ always() }} run: | # Start report echo "# Correlation values" >> $GITHUB_STEP_SUMMARY @@ -97,18 +111,17 @@ jobs: # Loop through test report files for team in ${{ needs.identify-tests.outputs.teams }} do - cat test_pipeline-$team.txt >> $GITHUB_STEP_SUMMARY + # Retrieve the report from GITHUB_ENV, as set up by the pytest job + echo $(report_team-$team) >> $GITHUB_STEP_SUMMARY done - name: Checkout wiki - if: ${{ always() }} uses: actions/checkout@v3 with: repository: ${{github.repository}}.wiki path: wiki - name: Report results on the project's wiki - if: ${{ always() }} run: | # Loop through test report files & write them to the corresponding wiki page for team in ${{ needs.identify-tests.outputs.teams }} @@ -119,7 +132,9 @@ jobs: echo "These values were calculated by GitHub Actions run [# $GITHUB_RUN_ID]($GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID)" >> wiki/pipeline_$team.md echo "| Team | Number of subjects | Test status | Correlation values |" >> wiki/pipeline_$team.md echo "| -------- | ------- | ------- | ------- |" >> wiki/pipeline_$team.md - cat test_pipeline-$team.txt >> wiki/pipeline_$team.md + + # Retrieve the report from GITHUB_ENV, as set up by the pytest job + echo $(report_team-$team) >> wiki/pipeline_$team.md done # Push changes From 434695ae34b11d90a75c6ccca5305ff6bc753822 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 29 Sep 2023 13:50:23 +0200 Subject: [PATCH 18/19] Dummy change to trigger pipeline test --- narps_open/pipelines/team_2T6S.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_2T6S.py b/narps_open/pipelines/team_2T6S.py index 6c42201a..7293947b 100755 --- a/narps_open/pipelines/team_2T6S.py +++ b/narps_open/pipelines/team_2T6S.py @@ -1,7 +1,7 @@ #!/usr/bin/python # coding: utf-8 -""" Write the work of NARPS team 2T6S using Nipype """ +""" Write the work of NARPS' team 2T6S using Nipype """ from os.path import join from itertools import product From 08d81d7a098eff4b72cd936903df38998a703c0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 3 Oct 2023 11:07:37 +0200 Subject: [PATCH 19/19] Using cache in test report workflow --- .github/workflows/pipeline_tests.yml | 31 ++++++++++++++++------------ 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/.github/workflows/pipeline_tests.yml b/.github/workflows/pipeline_tests.yml index 0fa0f2f4..2673c3bb 100644 --- a/.github/workflows/pipeline_tests.yml +++ b/.github/workflows/pipeline_tests.yml @@ -73,14 +73,12 @@ jobs: pytest -s -q -m "pipeline_test" ${{ needs.identify-tests.outputs.tests }} fi - - name: Export test results + - name: Cache test results if: ${{ always() }} - run: | - for team in ${{ needs.identify-tests.outputs.teams }} - do - report_value=$(> $GITHUB_ENV - done + uses: actions/cache@v3 + with: + key: cache-test-results-${{github.run_id}} + path: test_pipeline-*.txt - name: Archive pipeline execution failures if: ${{ failure() }} # Run only if previous job fails @@ -97,6 +95,13 @@ jobs: runs-on: ubuntu-latest if: ${{ always() }} steps: + + - name: Restore cache with test results + uses: actions/cache@v3 + with: + key: cache-test-results-${{github.run_id}} + path: test_pipeline-*.txt + - name: Report results on step summary run: | # Start report @@ -112,7 +117,7 @@ jobs: for team in ${{ needs.identify-tests.outputs.teams }} do # Retrieve the report from GITHUB_ENV, as set up by the pytest job - echo $(report_team-$team) >> $GITHUB_STEP_SUMMARY + cat test_pipeline-$team.txt >> $GITHUB_STEP_SUMMARY done - name: Checkout wiki @@ -126,15 +131,15 @@ jobs: # Loop through test report files & write them to the corresponding wiki page for team in ${{ needs.identify-tests.outputs.teams }} do - echo "# Correlation values for pipeline $team" > wiki/pipeline_$team.md - echo "Unthresholded maps, reproduced vs. results" >> wiki/pipeline_$team.md - echo "Correlation values are sorted from hypotheses 1 to 9" >> wiki/pipeline_$team.md - echo "These values were calculated by GitHub Actions run [# $GITHUB_RUN_ID]($GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID)" >> wiki/pipeline_$team.md + echo "# Correlation values for pipeline $team \n" > wiki/pipeline_$team.md + echo "Unthresholded maps, reproduced vs. results \n" >> wiki/pipeline_$team.md + echo "Correlation values are sorted from hypotheses 1 to 9 \n" >> wiki/pipeline_$team.md + echo "These values were calculated by GitHub Actions run [# $GITHUB_RUN_ID]($GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID) \n" >> wiki/pipeline_$team.md echo "| Team | Number of subjects | Test status | Correlation values |" >> wiki/pipeline_$team.md echo "| -------- | ------- | ------- | ------- |" >> wiki/pipeline_$team.md # Retrieve the report from GITHUB_ENV, as set up by the pytest job - echo $(report_team-$team) >> wiki/pipeline_$team.md + cat test_pipeline-$team.txt >> wiki/pipeline_$team.md done # Push changes