From d7ba0e7c1269d90886c1a76056946343e06d3979 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 26 Feb 2024 15:30:04 +0100 Subject: [PATCH 1/7] [R9K3][TEST] code and tests for the pipeline --- narps_open/pipelines/__init__.py | 2 +- narps_open/pipelines/team_R9K3.py | 628 +++++++++++++++ narps_open/pipelines/team_R9K3_wip.py | 715 ------------------ tests/conftest.py | 12 +- tests/pipelines/test_team_R9K3.py | 123 +++ tests/test_conftest.py | 58 +- .../pipelines/team_R9K3/confounds.tsv | 3 + 7 files changed, 788 insertions(+), 753 deletions(-) create mode 100644 narps_open/pipelines/team_R9K3.py delete mode 100755 narps_open/pipelines/team_R9K3_wip.py create mode 100644 tests/pipelines/test_team_R9K3.py create mode 100644 tests/test_data/pipelines/team_R9K3/confounds.tsv diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index e9bf1f0b..66cab6f5 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -66,7 +66,7 @@ 'R42Q': None, 'R5K7': None, 'R7D1': None, - 'R9K3': None, + 'R9K3': 'PipelineTeam08MQ', 'SM54': None, 'T54A': 'PipelineTeamT54A', 'U26C': 'PipelineTeamU26C', diff --git a/narps_open/pipelines/team_R9K3.py b/narps_open/pipelines/team_R9K3.py new file mode 100644 index 00000000..3aba32b7 --- /dev/null +++ b/narps_open/pipelines/team_R9K3.py @@ -0,0 +1,628 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Write the work of NARPS' team R9K3 using Nipype """ + +from os.path import join +from itertools import product + +from nipype import Workflow, Node, MapNode +from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.utility.base import Merge +from nipype.interfaces.io import SelectFiles, DataSink +from nipype.interfaces.spm import ( + Smooth, Level1Design, OneSampleTTestDesign, TwoSampleTTestDesign, + EstimateModel, EstimateContrast, Threshold + ) +from nipype.algorithms.modelgen import SpecifySPMModel +from nipype.algorithms.misc import Gunzip + +from narps_open.pipelines import Pipeline +from narps_open.data.task import TaskInformation +from narps_open.data.participants import get_group +from narps_open.core.interfaces import InterfaceFactory +from narps_open.core.common import ( + list_intersection, elements_in_string, clean_list + ) +from narps_open.utils.configuration import Configuration + +class PipelineTeamR9K3(Pipeline): + """ A class that defines the pipeline of team R9K3. """ + + def __init__(self): + super().__init__() + self.fwhm = 6.0 + self.team_id = 'R9K3' + self.contrast_list = ['0001', '0002'] + conditions = ['trialxgain^1', 'trialxloss^1'] + self.subject_level_contrasts = [ + ['effect_of_gain', 'T', conditions, [0, 1, 0]], + ['effect_of_loss', 'T', conditions, [0, 0, 1]] + ] + + def get_preprocessing(self): + """ + Create the preprocessing workflow. + + Returns: + - preprocessing : nipype.WorkFlow + """ + # Initialize preprocessing workflow to connect nodes along the way + preprocessing = Workflow( + base_dir = self.directories.working_dir, + name = 'preprocessing') + + # IDENTITY INTERFACE - To iterate on subjects + information_source = Node(IdentityInterface( + fields = ['subject_id']), + name = 'information_source') + information_source.iterables = [('subject_id', self.subject_list)] + + # SELECT FILES - to select necessary files + templates = { + 'func' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_bold.nii.gz'), + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + preprocessing.connect(information_source, 'subject_id', select_files, 'subject_id') + + # GUNZIP - gunzip files because SPM do not use .nii.gz files + gunzip_func = MapNode(Gunzip(), name = 'gunzip_func', iterfield = ['in_file']) + preprocessing.connect(select_files, 'func', gunzip_func, 'in_file') + + # SMOOTH - Spatial smoothing of fMRI data. + smoothing = MapNode(Smooth(), name = 'smoothing', iterfield = 'in_files') + smoothing.inputs.fwhm = [self.fwhm] * 3 + preprocessing.connect(gunzip_func, 'out_file', smoothing, 'in_files') + + # DATA SINK - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + preprocessing.connect(smoothing, 'smoothed_files', data_sink, 'preprocessing.@smoothed') + + # Remove large files, if requested + if Configuration()['pipelines']['remove_unused_data']: + + # MERGE - Merge all temporary outputs once they are no longer needed + merge_temp_files = Node(Merge(2), name = 'merge_temp_files') + preprocessing.connect(gunzip_func, 'out_file', merge_temp_files, 'in1') + preprocessing.connect(smoothing, 'smoothed_files', merge_temp_files, 'in2') + + # FUNCTION - Remove temporary files once they are no longer needed + remove_gunziped = MapNode( + InterfaceFactory.create('remove_parent_directory'), + name = 'remove_gunziped', + iterfield = 'file_name' + ) + preprocessing.connect(merge_temp_files, 'out', remove_gunziped, 'file_name') + preprocessing.connect(data_sink, 'out_file', remove_gunziped, '_') + + return preprocessing + + def get_preprocessing_outputs(self): + """ Return the names of the files the preprocessing is supposed to generate. """ + + output_dir = join(self.directories.output_dir, 'preprocessing', '_subject_id_{subject_id}') + + # Smoothing outputs # TODO + templates = [join(output_dir, f'_smoothing{index}', + 'srsub-{subject_id}'+f'_task-MGT_run-{run_id}_bold.nii')\ + for index, run_id in zip(range(len(self.run_list)), self.run_list)] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s) for s in self.subject_list] + + return return_list + + def get_run_level_analysis(self): + """ No run level analysis has been done by team R9K3 """ + return None + + def get_subject_information(event_file): + """ + Create Bunchs for specifySPMModel, from data extracted from an event_file. + + Parameters : + - event_files: str, event file (one per run) for the subject + + Returns : + - subject_information: Bunch, relevant event information for subject level analysis. + """ + from nipype.interfaces.base import Bunch + from numpy import max as npmax + + trial_onsets = [] + trial_durations = [] + weights_gain = [] + weights_loss = [] + + with open(event_file, "rt") as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + + trial_onsets.append(float(info[0])) + trial_durations.append(0.0) + weights_gain.append(float(info[2])) + weights_loss.append(float(info[3])) + + # Scale weights between 0 and 1 + weights_gain = list(weights_gain / npmax(weights_gain)) + weights_loss = list(weights_loss / npmax(weights_loss)) + + return Bunch( + conditions = ['trial'], + onsets = [trial_onsets], + durations = [trial_durations], + amplitudes = None, + tmod = None, + pmod = [ + Bunch( + name = ['gain', 'loss'], + poly = [1, 1], + param = [weights_gain, weights_loss], + ), + None, + ], + regressor_names = None, + regressors = None + ) + + def get_confounds_file(confounds_file: str, subject_id: str, run_id: str) -> str: + """ + Create a tsv file with only desired confounds per subject per run. + + Parameters : + - confounds_file: str, path to the file containing confounds from fmriprep + - subject_id : related subject id + - run_id : related run id + + Return : + - out_file : path to new file containing only desired confounds + """ + from os.path import abspath + + from pandas import read_csv, DataFrame + from numpy import array, transpose + + # Get the dataframe containing the 6 head motion parameter regressors + data_frame = read_csv(confounds_file, sep = '\t', header=0) + + # Extract parameters we want to use for the model + retained_parameters = DataFrame(transpose(array([ + data_frame['X'], data_frame['Y'], data_frame['Z'], + data_frame['RotX'], data_frame['RotY'], data_frame['RotZ']]))) + + # Write confounds to a file + out_file = abspath(f'confounds_file_sub-{subject_id}_run-{run_id}.tsv') + with open(out_file, 'w', encoding = 'utf-8') as writer: + writer.write(retained_parameters.to_csv( + sep = '\t', index = False, header = False, na_rep = '0.0')) + + return out_file + + def get_subject_level_analysis(self): + """ + Create the subject level analysis workflow. + + Returns: + - subject_level : nipype.WorkFlow + WARNING: the name attribute of the workflow is 'subject_level_analysis' + """ + # Create subject level analysis workflow + subject_level = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis') + + # IDENTITY INTERFACE - To iterate on subjects + information_source = Node(IdentityInterface( + fields = ['subject_id']), + name = 'information_source') + information_source.iterables = [('subject_id', self.subject_list)] + + # SELECT FILES - to select necessary files + templates = { + 'confounds' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_bold_confounds.tsv'), + 'func' : join(self.directories.output_dir, 'preprocessing', '_subject_id_{subject_id}', + '_smoothing*', 'srsub-{subject_id}_task-MGT_run-*_bold.nii'), # TODO + 'event' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_events.tsv'), + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + subject_level.connect(information_source, 'subject_id', select_files, 'subject_id') + + # FUNCTION get_subject_information - generate files with event data + subject_information = MapNode(Function( + function = self.get_subject_information, + input_names = ['event_file'], + output_names = ['subject_info']), + iterfield = 'event_file', + name = 'subject_information') + subject_level.connect(select_files, 'event', subject_information, 'event_file') + + # FUNCTION node get_confounds_file - generate files with confounds data + confounds = MapNode( + Function( + function = self.get_confounds_file, + input_names = ['confounds_file', 'subject_id', 'run_id'], + output_names = ['confounds_file'] + ), + name = 'confounds', + iterfield = ['confounds_file', 'run_id']) + confounds.inputs.run_id = self.run_list + subject_level.connect(information_source, 'subject_id', confounds, 'subject_id') + subject_level.connect(select_files, 'confounds', confounds, 'confounds_file') + + # SPECIFY MODEL - generates SPM-specific Model + specify_model = Node(SpecifySPMModel(), name = 'specify_model') + specify_model.inputs.input_units = 'secs' + specify_model.inputs.output_units = 'secs' + specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] + specify_model.inputs.high_pass_filter_cutoff = 128 + subject_level.connect(select_files, 'func', specify_model, 'functional_runs') + subject_level.connect(confounds, 'confounds_file', specify_model, 'realignment_parameters') + subject_level.connect(subject_information, 'subject_info', specify_model, 'subject_info') + + # LEVEL 1 DESIGN - Generates an SPM design matrix + model_design = Node(Level1Design(), name = 'model_design') + model_design.inputs.bases = {'hrf': {'derivs': [0, 0]}} + model_design.inputs.timing_units = 'secs' + model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + subject_level.connect(specify_model, 'session_info', model_design, 'session_info') + + # ESTIMATE MODEL - estimate the parameters of the model + model_estimate = Node(EstimateModel(), name = 'model_estimate') + model_estimate.inputs.estimation_method = {'Classical': 1} + subject_level.connect(model_design, 'spm_mat_file', model_estimate, 'spm_mat_file') + + # ESTIMATE CONTRAST - estimates contrasts + contrast_estimate = Node(EstimateContrast(), name = 'contrast_estimate') + contrast_estimate.inputs.contrasts = self.subject_level_contrasts + subject_level.connect([ + (model_estimate, contrast_estimate, [ + ('spm_mat_file', 'spm_mat_file'), + ('beta_images', 'beta_images'), + ('residual_image', 'residual_image') + ]) + ]) + + # DATA SINK - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + subject_level.connect([ + (contrast_estimate, data_sink, [ + ('con_images', 'subject_level_analysis.@con_images'), + ('spmT_images', 'subject_level_analysis.@spmT_images'), + ('spm_mat_file', 'subject_level_analysis.@spm_mat_file') + ]) + ]) + + return subject_level + + def get_subject_level_outputs(self): + """ Return the names of the files the subject level analysis is supposed to generate. """ + + # Contrat maps + templates = [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # SPM.mat file + templates += [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', 'SPM.mat')] + + # spmT maps + templates += [join( + self.directories.output_dir, + 'subject_level_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s) for s in self.subject_list] + + return return_list + + def get_group_level_analysis(self): + """ + Return all workflows for the group level analysis. + + Returns; + - a list of nipype.WorkFlow + """ + + methods = ['equalRange', 'equalIndifference', 'groupComp'] + return [self.get_group_level_analysis_sub_workflow(method) for method in methods] + + def get_group_level_analysis_sub_workflow(self, method): + """ + Return a workflow for the group level analysis. + + Parameters: + - method: one of 'equalRange', 'equalIndifference' or 'groupComp' + + Returns: + - group_level_analysis: nipype.WorkFlow + """ + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Infosource - a function free node to iterate over the list of subject names + information_source = Node( + IdentityInterface( + fields=['contrast_id']), + name='information_source') + information_source.iterables = [('contrast_id', self.contrast_list)] + + # SelectFiles + templates = { + # Contrasts for all participants + 'contrasts' : join(self.directories.output_dir, + 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii') + } + + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.results_dir + select_files.inputs.force_lists = True + + # Datasink - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Create a function to complete the subject ids out from the get_equal_*_subjects nodes + # If not complete, subject id '001' in search patterns + # would match all contrast files with 'con_0001.nii'. + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts', iterfield = 'input_str' + ) + + # Estimate model + estimate_model = Node(EstimateModel(), name = 'estimate_model') + estimate_model.inputs.estimation_method = {'Classical':1} + + # Estimate contrasts + estimate_contrast = Node(EstimateContrast(), name = 'estimate_contrast') + estimate_contrast.inputs.group_contrast = True + + ## Create thresholded maps + threshold = MapNode(Threshold(), + name = 'threshold', + iterfield = ['stat_image', 'contrast_index']) + threshold.inputs.use_fwe_correction = False + threshold.inputs.height_threshold = 0.001 + threshold.inputs.extent_threshold = 5 + threshold.synchronize = True + + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}') + group_level_analysis.connect([ + (information_source, select_files, [('contrast_id', 'contrast_id')]), + (select_files, get_contrasts, [('contrasts', 'input_str')]), + (estimate_model, estimate_contrast, [ + ('spm_mat_file', 'spm_mat_file'), + ('residual_image', 'residual_image'), + ('beta_images', 'beta_images')]), + (estimate_contrast, threshold, [ + ('spm_mat_file', 'spm_mat_file'), + ('spmT_images', 'stat_image')]), + (estimate_model, data_sink, [ + ('mask_image', f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask')]), + (estimate_contrast, data_sink, [ + ('spm_mat_file', f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), + ('spmT_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@T'), + ('con_images', f'group_level_analysis_{method}_nsub_{nb_subjects}.@con')]), + (threshold, data_sink, [ + ('thresholded_map', f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh')])]) + + if method in ('equalRange', 'equalIndifference'): + estimate_contrast.inputs.contrasts = [ + ('Group', 'T', ['mean'], [1]), + ('Group', 'T', ['mean'], [-1]) + ] + + threshold.inputs.contrast_index = [1, 2] + + # Specify design matrix + one_sample_t_test_design = Node(OneSampleTTestDesign(), + name = 'one_sample_t_test_design') + group_level_analysis.connect([ + (get_contrasts, one_sample_t_test_design, [ + (('out_list', clean_list), 'in_files') + ]), + (one_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + if method == 'equalRange': + group_level_analysis.connect([ + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'equalIndifference': + group_level_analysis.connect([ + (get_equal_indifference_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]) + ]) + + elif method == 'groupComp': + estimate_contrast.inputs.contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) + ] + + threshold.inputs.contrast_index = [1] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts_2 = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts_2', iterfield = 'input_str' + ) + + # Specify design matrix + two_sample_t_test_design = Node(TwoSampleTTestDesign(), + name = 'two_sample_t_test_design') + two_sample_t_test_design.inputs.unequal_variance = True + + group_level_analysis.connect([ + (select_files, get_contrasts_2, [('contrasts', 'input_str')]), + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_equal_indifference_subjects, get_contrasts_2, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_contrasts, two_sample_t_test_design, [ + (('out_list', clean_list), 'group1_files') + ]), + (get_contrasts_2, two_sample_t_test_design, [ + (('out_list', clean_list), 'group2_files') + ]), + (two_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) + ]) + + return group_level_analysis + + def get_group_level_outputs(self): + """ Return all names for the files the group level analysis is supposed to generate. """ + + # Handle equalRange and equalIndifference + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['equalRange', 'equalIndifference'], + 'file': [ + 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', + 'spmT_0001.nii', 'spmT_0002.nii', + join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) + + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + # Handle groupComp + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['groupComp'], + 'file': [ + 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', + join('_threshold0', 'spmT_0001_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) + + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + return return_list + + def get_hypotheses_outputs(self): + """ Return all hypotheses output file names. + Note that hypotheses 5 to 8 correspond to the maps given by the team in their results ; + but they are not fully consistent with the hypotheses definitions as expected by NARPS. + """ + nb_sub = len(self.subject_list) + files = [ + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold1', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0002_thr.nii'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0002.nii'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii') + ] + return [join(self.directories.output_dir, f) for f in files] diff --git a/narps_open/pipelines/team_R9K3_wip.py b/narps_open/pipelines/team_R9K3_wip.py deleted file mode 100755 index 65613e70..00000000 --- a/narps_open/pipelines/team_R9K3_wip.py +++ /dev/null @@ -1,715 +0,0 @@ -# pylint: skip-file -# THIS IS A TEMPLATE THAT CAN BE USE TO REPRODUCE A NEW PIPELINE - -import os -import shutil -from os.path import join as opj -from typing import List - -from nipype import Node, Workflow -from nipype.algorithms.misc import Gunzip -from nipype.algorithms.modelgen import ( # Functions used during L1 analysis - SpecifyModel, - SpecifySPMModel, -) -from nipype.interfaces.base import Bunch -from nipype.interfaces.io import DataSink, SelectFiles -from nipype.interfaces.spm import Smooth -from nipype.interfaces.utility import Function, IdentityInterface - -from .utils import fmriprep_data_template, raw_data_template - - -def get_preprocessing( - exp_dir: str, - result_dir: str, - working_dir: str, - output_dir: str, - subject_list: List[str], - run_list: List[str], - fwhm: float, -): - """ - Returns the preprocessing workflow. - - Parameters: - - exp_dir: directory where raw data are stored - - result_dir: directory where results will be stored - - working_dir: name of the sub-directory for intermediate results - - output_dir: name of the sub-directory for final results - - subject_list: list of subject for which you want to do the preprocessing - - run_list: list of runs for which you want to do the preprocessing - - fwhm: fwhm for smoothing step - - Returns: - - preprocessing: Nipype WorkFlow - """ - - infosource_preproc = Node( - IdentityInterface(fields=["subject_id", "run_id"]), name="infosource_preproc" - ) - - # Iterates over subject and runs - infosource_preproc.iterables = [ - ("subject_id", subject_list), - ("run_id", run_list), - ] - - # SelectFiles node - to select necessary files - selectfiles_preproc = Node( - SelectFiles(fmriprep_data_template(), base_directory=exp_dir), - name="selectfiles_preproc", - ) - - # DataSink Node - store the wanted results in the wanted repository - datasink_preproc = Node( - DataSink(base_directory=result_dir, container=working_dir), - name="datasink_preproc", - ) - - gunzip_func = Node(Gunzip(), name="gunzip_func") - - smooth = Node(Smooth(fwhm=fwhm, implicit_masking=False), name="smooth") - - preprocessing = Workflow( - base_dir=opj(result_dir, working_dir), name="preprocessing" - ) - - preprocessing.connect( - [ - ( - infosource_preproc, - selectfiles_preproc, - [("subject_id", "subject_id"), ("run_id", "run_id")], - ), - ( - selectfiles_preproc, - gunzip_func, - [("func_preproc", "in_file")], - ), - ( - gunzip_func, - smooth, - [("out_file", "in_files")], - ), - ( - smooth, - datasink_preproc, - [("smoothed_files", "preprocess.@sym_link")], - ), - ] - ) - - return preprocessing - -""" -In first level analyses, three regressors of interest were included in the model: - -1) task-related activity, -2) task-related activity modulated by the amount of gain, and -3) task-related activity modulated by the amount of loss. - -The regressor for the basic task-related activity was defined -as a vector of ones at every trial onset and zeros at all other time points -(event duration = 0 s). - -Within each functional run, each parametric modulator -(the raw gain or loss values) was scaled such that its maximum value became 1. -All three regressors were convolved with the canonical hemodynamic response function. -The two modulated regressors were orthogonalized against the original task regressor. -Temporal/dispersion derivatives of the regressors were not included. - -In both first and second level analyses, model parameters were estimated -using the classical (Restricted Maximum Likelihood) method as implemented in SPM12. - -For second level analyses, we used a mixed-effects approach with weighted least squares. - -In the second level analysis performed to test hypothesis 9 -(two-sample t-test comparing the equal indifference group and the equal range group), -the two groups were assumed to be independent and have unequal variance. -""" - - -# FIXME: THIS FUNCTION IS USED IN THE FIRST LEVEL ANALYSIS PIPELINES OF SPM -# THIS IS AN EXAMPLE THAT IS ADAPTED TO A SPECIFIC PIPELINE -# MODIFY ACCORDING TO THE PIPELINE YOU WANT TO REPRODUCE -def get_subject_infos_spm(event_files: List[str], runs: List[str]): - """ - Create Bunchs for specifySPMModel. - - Parameters : - - event_files: list of events files (one per run) for the subject - - runs: list of runs to use - - Returns : - - subject_info : list of Bunch for 1st level analysis. - """ - - cond_names = ["trial", "accepting", "rejecting"] - onset = {} - duration = {} - weights_gain = {} - weights_loss = {} - onset_button = {} - duration_button = {} - - # Loop over number of runs. - for r in range(len(runs)): - onset |= {f"{s}_run{str(r + 1)}": [] for s in cond_names} - duration |= {f"{s}_run{str(r + 1)}": [] for s in cond_names} - weights_gain[f"gain_run{str(r + 1)}"] = [] - weights_loss[f"loss_run{str(r + 1)}"] = [] - - for r, run in enumerate(runs): - - f_events = event_files[r] - - with open(f_events, "rt") as f: - next(f) # skip the header - - for line in f: - info = line.strip().split() - - for cond in cond_names: - val = f"{cond}_run{str(r + 1)}" - val_gain = f"gain_run{str(r + 1)}" - val_loss = f"loss_run{str(r + 1)}" - if cond == "trial": - onset[val].append(float(info[0])) # onsets for trial_run1 - duration[val].append(float(4)) - weights_gain[val_gain].append( - float(info[2]) - ) # weights gain for trial_run1 - weights_loss[val_loss].append( - float(info[3]) - ) # weights loss for trial_run1 - elif cond == "accepting" and "accept" in info[5]: - onset[val].append(float(info[0]) + float(info[4])) - duration[val].append(float(0)) - elif cond == "rejecting" and "reject" in info[5]: - onset[val].append(float(info[0]) + float(info[4])) - duration[val].append(float(0)) - - # Bunching is done per run, i.e. trial_run1, trial_run2, etc. - # But names must not have '_run1' etc because we concatenate runs - subject_info = [] - for r in range(len(runs)): - - cond = [f"{s}_run{str(r + 1)}" for s in cond_names] - gain = f"gain_run{str(r + 1)}" - loss = f"loss_run{str(r + 1)}" - - subject_info.insert( - r, - Bunch( - conditions=cond_names, - onsets=[onset[c] for c in cond], - durations=[duration[c] for c in cond], - amplitudes=None, - tmod=None, - pmod=[ - Bunch( - name=["gain", "loss"], - poly=[1, 1], - param=[weights_gain[gain], weights_loss[loss]], - ), - None, - ], - regressor_names=None, - regressors=None, - ), - ) - - return subject_info - - -# FIXME: THIS FUNCTION CREATES THE CONTRASTS THAT WILL BE ANALYZED IN THE FIRST LEVEL ANALYSIS -# IT IS ADAPTED FOR A SPECIFIC PIPELINE AND SHOULD BE MODIFIED DEPENDING ON THE PIPELINE -# YOU ARE TRYING TO REPRODUCE -def get_contrasts(subject_id: str): - """ - Create the list of tuples that represents contrasts. - Each contrast is in the form : - (Name,Stat,[list of condition names],[weights on those conditions]) - - Parameters: - - subject_id: ID of the subject - - Returns: - - contrasts: list of tuples, list of contrasts to analyze - """ - # list of condition names - conditions = ["trial", "trialxgain^1", "trialxloss^1"] - - # create contrasts - trial = ("trial", "T", conditions, [1, 0, 0]) - - effect_gain = ("effect_of_gain", "T", conditions, [0, 1, 0]) - - effect_loss = ("effect_of_loss", "T", conditions, [0, 0, 1]) - - # contrast list - contrasts = [effect_gain, effect_loss] - - return contrasts - - -# FUNCTION TO CREATE THE WORKFLOW OF A L1 ANALYSIS (SUBJECT LEVEL) -def get_l1_analysis( - exp_dir: str, - result_dir: str, - working_dir: str, - output_dir: str, - subject_list: List[str], - run_list: List[str], - TR: float, -): - """ - Returns the first level analysis workflow. - - Parameters: - - exp_dir: directory where raw data are stored - - result_dir: directory where results will be stored - - working_dir: name of the sub-directory for intermediate results - - output_dir: name of the sub-directory for final results - - subject_list: list of subject for which you want to do the analysis - - run_list: list of runs for which you want to do the analysis - - TR: time repetition used during acquisition - - Returns: - - l1_analysis : Nipype WorkFlow - """ - # THE FOLLOWING PART STAYS THE SAME FOR ALL PIPELINES - # Infosource Node - To iterate on subjects - infosource = Node( - IdentityInterface( - fields=["subject_id", "exp_dir", "result_dir", "working_dir", "run_list"], - exp_dir=exp_dir, - result_dir=result_dir, - working_dir=working_dir, - run_list=run_list, - ), - name="infosource", - ) - - # ITERATES OVER SUBJECT LIST - infosource.iterables = [("subject_id", subject_list)] - - # Templates to select files node - - # FIXME: CHANGE THE NAME OF THE FILE - # DEPENDING ON THE FILENAMES OF RESULTS OF PREPROCESSING - func_file = opj( - result_dir, - output_dir, - "preprocess", - "_run_id_*_subject_id_{subject_id}", - "complete_filename_{subject_id}_complete_filename.nii", - ) - - event_files = opj( - exp_dir, - "sub-{subject_id}", - "func", - "sub-{subject_id}_task-MGT_run-*_events.tsv", - ) - - - - template = {"func": func_file, "event": raw_data_template()["event_file"]} - - # SelectFiles node - to select necessary files - selectfiles = Node( - SelectFiles(template, base_directory=exp_dir), name="selectfiles" - ) - - # DataSink Node - store the wanted results in the wanted repository - datasink = Node( - DataSink(base_directory=result_dir, container=output_dir), name="datasink" - ) - - # Get Subject Info - get subject specific condition information - subject_infos = Node( - Function( - input_names=["event_files", "runs"], - output_names=["subject_info"], - function=get_subject_infos_spm, - ), - name="subject_infos", - ) - - subject_infos.inputs.runs = run_list - # THIS IS THE NODE EXECUTING THE get_contrasts FUNCTION - # Node contrasts to get contrasts - contrasts = Node( - Function( - function=get_contrasts, - input_names=["subject_id"], - output_names=["contrasts"], - ), - name="contrasts", - ) - - # Create l1 analysis workflow and connect its nodes - l1_analysis = Workflow(base_dir=opj(result_dir, working_dir), name="l1_analysis") - - l1_analysis.connect( - [ - (infosource, selectfiles, [("subject_id", "subject_id")]), - (infosource, contrasts, [("subject_id", "subject_id")]), - (selectfiles, subject_infos, [("event", "event_files")]), - # FIXME: Complete with name of node to link with and the name of the input - ( - selectfiles, - node_variable[("func", "node_input_name")], - ), - # Input and output names can be found on NiPype documentation - (node_variable, datasink, [("node_output_name", "preprocess.@sym_link")]), - ] - ) - - return l1_analysis - - -# THIS FUNCTION RETURNS THE LIST OF IDS AND FILES OF EACH GROUP OF PARTICIPANTS -# TO DO SEPARATE GROUP LEVEL ANALYSIS AND BETWEEN GROUP ANALYSIS -# THIS FUNCTIONS IS ADAPTED FOR AN SPM PIPELINE. -def get_subset_contrasts_spm( - file_list, subject_list: List[str], participants_file: str -): - """ - Parameters : - - file_list : original file list selected by selectfiles node - - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: file containing participants characteristics - - This function return the file list containing only the files belonging - to the subject in the wanted group. - """ - equalIndifference_id = [] - equalRange_id = [] - equalIndifference_files = [] - equalRange_files = [] - - with open( - participants_file, "rt" - ) as f: # Reading file containing participants IDs and groups - next(f) # skip the header - - for line in f: - info = line.strip().split() - - if info[0][-3:] in subject_list: - if info[1] == "equalIndifference": # Checking for each participant if its ID was selected - # and separate people depending on their group - equalIndifference_id.append(info[0][-3:]) - elif info[1] == "equalRange": - equalRange_id.append(info[0][-3:]) - - # Checking for each selected file if the corresponding participant was selected - # and add the file to the list corresponding to its group - for file in file_list: - sub_id = file.split("/") - if sub_id[-2][-3:] in equalIndifference_id: - equalIndifference_files.append(file) - elif sub_id[-2][-3:] in equalRange_id: - equalRange_files.append(file) - - return ( - equalIndifference_id, - equalRange_id, - equalIndifference_files, - equalRange_files, - ) - - -# FUNCTION TO CREATE THE WORKFLOW OF A L2 ANALYSIS (GROUP LEVEL) -def get_l2_analysis( - exp_dir: str, - result_dir: str, - working_dir: str, - output_dir: str, - subject_list: List[str], - contrast_list: List[str], - n_sub: int, - method: str, -): - """ - Returns the 2nd level of analysis workflow. - - Parameters: - - exp_dir: directory where raw data are stored - - result_dir: directory where results will be stored - - working_dir: name of the sub-directory for intermediate results - - output_dir: name of the sub-directory for final results - - subject_list: list of subject for which you want to do the preprocessing - - contrast_list: list of contrasts to analyze - - n_sub: number of subjects used to do the analysis - - method: one of "equalRange", "equalIndifference" or "groupComp" - - Returns: - - l2_analysis: Nipype WorkFlow - """ - # THE FOLLOWING PART STAYS THE SAME FOR ALL PREPROCESSING PIPELINES - # Infosource - a function free node to iterate over the list of subject names - infosource_groupanalysis = Node( - IdentityInterface(fields=["contrast_id", "subjects"], subjects=subject_list), - name="infosource_groupanalysis", - ) - - infosource_groupanalysis.iterables = [("contrast_id", contrast_list)] - - # SelectFiles - contrast_file = opj( - result_dir, - output_dir, - "l1_analysis", - "_subject_id_*", - "complete_filename_{contrast_id}_complete_filename.nii", - ) - # FIXME: CHANGE THE NAME OF THE FILE DEPENDING ON - # THE FILENAMES OF THE RESULTS OF PREPROCESSING - # (DIFFERENT FOR AN FSL PIPELINE) - - participants_file = opj(exp_dir, "participants.tsv") - - templates = {"contrast": contrast_file, "participants": participants_file} - - selectfiles_groupanalysis = Node( - SelectFiles(templates, base_directory=result_dir, force_list=True), - name="selectfiles_groupanalysis", - ) - - # Datasink node : to save important files - datasink_groupanalysis = Node( - DataSink(base_directory=result_dir, container=output_dir), - name="datasink_groupanalysis", - ) - - # IF THIS IS AN SPM PIPELINE: - # Node to select subset of contrasts - sub_contrasts = Node( - Function( - input_names=["file_list", "method", "subject_list", "participants_file"], - output_names=[ - "equalIndifference_id", - "equalRange_id", - "equalIndifference_files", - "equalRange_files", - ], - function=get_subset_contrasts_spm, - ), - name="sub_contrasts", - ) - - sub_contrasts.inputs.method = method - - # IF THIS IS AN FSL PIPELINE: - subgroups_contrasts = Node( - Function( - input_names=["copes", "varcopes", "subject_ids", "participants_file"], - output_names=[ - "copes_equalIndifference", - "copes_equalRange", - "varcopes_equalIndifference", - "varcopes_equalRange", - "equalIndifference_id", - "equalRange_id", - "copes_global", - "varcopes_global", - ], - function=get_subgroups_contrasts, - ), - name="subgroups_contrasts", - ) - - regs = Node( - Function( - input_names=[ - "equalRange_id", - "equalIndifference_id", - "method", - "subject_list", - ], - output_names=["regressors"], - function=get_regs, - ), - name="regs", - ) - regs.inputs.method = method - regs.inputs.subject_list = subject_list - - # FIXME: THE FOLLOWING PART HAS TO BE MODIFIED WITH NODES OF THE PIPELINE - node_variable = Node( - node_function, name="node_name" - ) # Replace with the name of the node_variable, - # the node_function to use in the NiPype interface, - # and the name of the node (recommended to be the same as node_variable) - - # FIXME: ADD OTHER NODES WITH THE DIFFERENT STEPS OF THE PIPELINE - - l2_analysis = Workflow( - base_dir=opj(result_dir, working_dir), name=f"l2_analysis_{method}_nsub_{n_sub}" - ) - # FOR AN SPM PIPELINE - l2_analysis.connect( - [ - ( - infosource_groupanalysis, - selectfiles_groupanalysis, - [("contrast_id", "contrast_id")], - ), - (infosource_groupanalysis, sub_contrasts, [("subjects", "subject_list")]), - ( - selectfiles_groupanalysis, - sub_contrasts, - [("contrast", "file_list"), ("participants", "participants_file")], - ), # Complete with other links between nodes - ] - ) - - # FOR AN FSL PIPELINE - l2_analysis.connect( - [ - ( - infosource_groupanalysis, - selectfiles_groupanalysis, - [("contrast_id", "contrast_id")], - ), - ( - infosource_groupanalysis, - subgroups_contrasts, - [("subject_list", "subject_ids")], - ), - ( - selectfiles_groupanalysis, - subgroups_contrasts, - [ - ("cope", "copes"), - ("varcope", "varcopes"), - ("participants", "participants_file"), - ], - ), - ( - selectfiles_groupanalysis, - node_variable[("func", "node_input_name")], - ), # Complete with name of node to link with and the name of the input - # Input and output names can be found on NiPype documentation - ( - node_variable, - datasink_groupanalysis, - [("node_output_name", "preprocess.@sym_link")], - ), - ] - ) # Complete with other links between nodes - - if method == "equalRange" or method == "equalIndifference": - contrasts = [("Group", "T", ["mean"], [1]), ("Group", "T", ["mean"], [-1])] - - elif method == "groupComp": - contrasts = [ - ("Eq range vs Eq indiff in loss", "T", ["Group_{1}", "Group_{2}"], [1, -1]) - ] - - # FIXME: ADD OTHER NODES WITH THE DIFFERENT STEPS OF THE PIPELINE - - return l2_analysis - - -# THIS FUNCTION IS USED TO REORGANIZE FINAL RESULTS OF THE PIPELINE -def reorganize_results(result_dir: str, output_dir: str, n_sub: int, team_ID: str): - """ - Reorganize the results to analyze them. - - Parameters: - - result_dir: directory where results will be stored - - output_dir: name of the sub-directory for final results - - n_sub: number of subject used for the analysis - - team_ID: ID of the team to reorganize results - - """ - - h1 = opj( - result_dir, - output_dir, - f"l2_analysis_equalIndifference_nsub_{n_sub}", - "_contrast_id_01", - ) - h2 = opj( - result_dir, - output_dir, - f"l2_analysis_equalRange_nsub_{n_sub}", - "_contrast_id_01", - ) - h3 = opj( - result_dir, - output_dir, - f"l2_analysis_equalIndifference_nsub_{n_sub}", - "_contrast_id_01", - ) - h4 = opj( - result_dir, - output_dir, - f"l2_analysis_equalRange_nsub_{n_sub}", - "_contrast_id_01", - ) - h5 = opj( - result_dir, - output_dir, - f"l2_analysis_equalIndifference_nsub_{n_sub}", - "_contrast_id_02", - ) - h6 = opj( - result_dir, - output_dir, - f"l2_analysis_equalRange_nsub_{n_sub}", - "_contrast_id_02", - ) - h7 = opj( - result_dir, - output_dir, - f"l2_analysis_equalIndifference_nsub_{n_sub}", - "_contrast_id_02", - ) - h8 = opj( - result_dir, - output_dir, - f"l2_analysis_equalRange_nsub_{n_sub}", - "_contrast_id_02", - ) - h9 = opj( - result_dir, output_dir, f"l2_analysis_groupComp_nsub_{n_sub}", "_contrast_id_02" - ) - - h = [h1, h2, h3, h4, h5, h6, h7, h8, h9] - - repro_unthresh = [ - opj(filename, "_change_filename_.nii") for i, filename in enumerate(h) - ] # Change filename with the filename of the final results - - repro_thresh = [ - opj(filename, "_change_filename_.nii") for i, filename in enumerate(h) - ] - - if not os.path.isdir(opj(result_dir, "NARPS-reproduction")): - os.mkdir(opj(result_dir, "NARPS-reproduction")) - - for i, filename in enumerate(repro_unthresh): - f_in = filename - f_out = opj( - result_dir, - "NARPS-reproduction", - f"team_{team_ID}_nsub_{n_sub}_hypo{i+1}_unthresholded.nii", - ) - shutil.copyfile(f_in, f_out) - - for i, filename in enumerate(repro_thresh): - f_in = filename - f_out = opj( - result_dir, - "NARPS-reproduction", - f"team_{team_ID}_nsub_{n_sub}_hypo{i+1}_thresholded.nii", - ) - shutil.copyfile(f_in, f_out) - - print(f"Results files of team {team_ID} reorganized.") diff --git a/tests/conftest.py b/tests/conftest.py index 73dd095e..2223518e 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,12 +6,13 @@ pytest on (a) test file(s) in the same directory. """ -from os import remove +from os import remove, mkdir from os.path import join, isfile +from tempfile import mkdtemp from shutil import rmtree from numpy import isclose -from pytest import helpers +from pytest import helpers, fixture from pathvalidate import is_valid_filepath from narps_open.pipelines import Pipeline @@ -24,6 +25,13 @@ # Init configuration, to ensure it is in testing mode Configuration(config_type='testing') +@fixture +def temporary_data_dir(): + """ A fixture to create and remove a temporary directory for the tests """ + data_dir = mkdtemp() + yield data_dir + rmtree(data_dir, ignore_errors = True) + @helpers.register def compare_float_2d_arrays(array_1, array_2): """ Assert array_1 and array_2 are close enough """ diff --git a/tests/pipelines/test_team_R9K3.py b/tests/pipelines/test_team_R9K3.py new file mode 100644 index 00000000..2a435faf --- /dev/null +++ b/tests/pipelines/test_team_R9K3.py @@ -0,0 +1,123 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'narps_open.pipelines.team_R9K3' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_team_R9K3.py + pytest -q test_team_R9K3.py -k +""" +from os.path import join, exists, abspath +from filecmp import cmp + +from pytest import helpers, mark +from nipype import Workflow, Node +from nipype.interfaces.utility import Function +from nipype.interfaces.base import Bunch + +from narps_open.utils.configuration import Configuration +from narps_open.pipelines.team_R9K3 import PipelineTeamR9K3 + +class TestPipelinesTeamR9K3: + """ A class that contains all the unit tests for the PipelineTeamR9K3 class.""" + + @staticmethod + @mark.unit_test + def test_create(): + """ Test the creation of a PipelineTeamR9K3 object """ + + pipeline = PipelineTeamR9K3() + + # 1 - check the parameters + assert pipeline.fwhm == 6.0 + assert pipeline.team_id == 'R9K3' + + # 2 - check workflows + assert isinstance(pipeline.get_preprocessing(), Workflow) + assert pipeline.get_run_level_analysis() is None + assert isinstance(pipeline.get_subject_level_analysis(), Workflow) + group_level = pipeline.get_group_level_analysis() + assert len(group_level) == 3 + for sub_workflow in group_level: + assert isinstance(sub_workflow, Workflow) + + @staticmethod + @mark.unit_test + def test_outputs(): + """ Test the expected outputs of a PipelineTeamR9K3 object """ + pipeline = PipelineTeamR9K3() + # 1 - 1 subject outputs + pipeline.subject_list = ['001'] + helpers.test_pipeline_outputs(pipeline, [4, 0, 5, 42, 18]) + + # 2 - 4 subjects outputs + pipeline.subject_list = ['001', '002', '003', '004'] + helpers.test_pipeline_outputs(pipeline, [16, 0, 20, 42, 18]) + + @staticmethod + @mark.unit_test + def test_confounds_file(temporary_data_dir): + """ Test the get_confounds_file method """ + + # Test files + in_confounds_file = abspath(join(Configuration()['directories']['test_data'], + 'pipelines', 'confounds.tsv')) + + # Reference file + ref_file = abspath(join(Configuration()['directories']['test_data'], + 'pipelines', 'team_R9K3', 'confounds.tsv')) + + # Create average values file + confounds_node = Node(Function( + input_names = ['confounds_file', 'subject_id', 'run_id'], + output_names = ['out_file'], + function = PipelineTeamR9K3.get_confounds_file), + name = 'confounds_node') + confounds_node.base_dir = temporary_data_dir + confounds_node.inputs.confounds_file = in_confounds_file + confounds_node.inputs.subject_id = 'sid' + confounds_node.inputs.run_id = 'rid' + confounds_node.run() + + # Check file was created + created_confounds_file = abspath(join( + temporary_data_dir, confounds_node.name, 'confounds_file_sub-sid_run-rid.tsv')) + assert exists(created_confounds_file) + + # Check contents + assert cmp(ref_file, created_confounds_file) + + @staticmethod + @mark.unit_test + def test_subject_information(): + """ Test the get_subject_information method """ + + # Test with 'gain' + test_event_file = join( + Configuration()['directories']['test_data'], 'pipelines', 'events.tsv') + information = PipelineTeamR9K3.get_subject_information(test_event_file) + + assert isinstance(information, Bunch) + assert information.conditions == ['trial'] + + helpers.compare_float_2d_arrays([[0.0, 0.0, 0.0, 0.0, 0.0]], information.durations) + helpers.compare_float_2d_arrays( + [[4.071, 11.834, 19.535, 27.535, 36.435]], + information.onsets) + paramateric_modulation = information.pmod[0] + assert isinstance(paramateric_modulation, Bunch) + assert paramateric_modulation.name == ['gain', 'loss'] + assert paramateric_modulation.poly == [1, 1] + helpers.compare_float_2d_arrays( + [[0.368421053, 0.894736842, 1.0, 0.263157895, 0.421052632], + [0.315789474, 0.736842105, 1.0, 0.789473684, 0.894736842]], + paramateric_modulation.param) + + @staticmethod + @mark.pipeline_test + def test_execution(): + """ Test the execution of a PipelineTeamR9K3 and compare results """ + helpers.test_pipeline_evaluation('R9K3') diff --git a/tests/test_conftest.py b/tests/test_conftest.py index 4ea92fb5..398a9a70 100644 --- a/tests/test_conftest.py +++ b/tests/test_conftest.py @@ -11,13 +11,12 @@ pytest -q test_conftest.py -k """ -from os import makedirs, remove -from os.path import join, abspath, isdir, isfile -from shutil import rmtree +from os import remove +from os.path import join, isdir, isfile from datetime import datetime -from pytest import mark, helpers, fixture, raises +from pytest import mark, helpers, raises from nipype import Node, Workflow from nipype.interfaces.utility import Function @@ -26,24 +25,13 @@ from narps_open.runner import PipelineRunner from narps_open.pipelines import Pipeline -TEST_DIR = abspath(join(Configuration()['directories']['test_runs'], 'test_conftest')) - -@fixture -def set_test_directory(scope = 'function'): - """ A fixture to remove temporary directory created by tests """ - - rmtree(TEST_DIR, ignore_errors = True) - makedirs(TEST_DIR, exist_ok = True) - yield - # Comment this line for debugging - rmtree(TEST_DIR, ignore_errors = True) - class MockupPipeline(Pipeline): """ A simple Pipeline class for test purposes """ - def __init__(self): + def __init__(self, base_dir: str): super().__init__() - self.test_file = join(TEST_DIR, 'test_conftest.txt') + self.base_dir = base_dir + self.test_file = join(base_dir, 'test_conftest.txt') # Init the test_file : write a number of execution set to zero with open(self.test_file, 'w', encoding = 'utf-8') as file: @@ -126,7 +114,7 @@ def create_workflow(self, workflow_name: str, file_list: list): node_files.inputs.file_list = file_list workflow = Workflow( - base_dir = TEST_DIR, + base_dir = self.base_dir, name = workflow_name ) workflow.add_nodes([node_count, node_decide, node_files]) @@ -166,21 +154,21 @@ def get_group_level_analysis(self): def get_preprocessing_outputs(self): """ Return a list of templates of the output files generated by the preprocessing """ - template = join(TEST_DIR, 'subject_id_{subject_id}_output_preprocessing_1.md') + template = join(self.base_dir, 'subject_id_{subject_id}_output_preprocessing_1.md') return [template.format(subject_id = s) for s in self.subject_list] def get_run_level_outputs(self): """ Return a list of templates of the output files generated by the run level analysis. Templates are expressed relatively to the self.directories.output_dir. """ - template = join(TEST_DIR, 'subject_id_{subject_id}_output_run_1.md') + template = join(self.base_dir, 'subject_id_{subject_id}_output_run_1.md') return [template.format(subject_id = s) for s in self.subject_list] def get_subject_level_outputs(self): """ Return a list of templates of the output files generated by the subject level analysis. Templates are expressed relatively to the self.directories.output_dir. """ - template = join(TEST_DIR, 'subject_id_{subject_id}_output_analysis_1.md') + template = join(self.base_dir, 'subject_id_{subject_id}_output_analysis_1.md') return [template.format(subject_id = s) for s in self.subject_list] def get_group_level_outputs(self): @@ -188,19 +176,19 @@ def get_group_level_outputs(self): Templates are expressed relatively to the self.directories.output_dir. """ templates = [ - join(TEST_DIR, 'group_{nb_subjects}_output_a.md'), - join(TEST_DIR, 'group_{nb_subjects}_output_b.md') + join(self.base_dir, 'group_{nb_subjects}_output_a.md'), + join(self.base_dir, 'group_{nb_subjects}_output_b.md') ] return_list = [t.format(nb_subjects = len(self.subject_list)) for t in templates] - template = join(TEST_DIR, 'hypothesis_{id}.md') + template = join(self.base_dir, 'hypothesis_{id}.md') return_list += [template.format(id = i) for i in range(1,19)] return return_list def get_hypotheses_outputs(self): """ Return the names of the files used by the team to answer the hypotheses of NARPS. """ - template = join(TEST_DIR, 'hypothesis_{id}.md') + template = join(self.base_dir, 'hypothesis_{id}.md') return [template.format(id = i) for i in range(1,19)] class MockupResultsCollection(): @@ -263,11 +251,11 @@ def test_compare_float_2d_arrays(): @staticmethod @mark.unit_test - def test_test_outputs(set_test_directory): + def test_test_outputs(temporary_data_dir): """ Test the test_pipeline_outputs helper """ # Test pipeline - pipeline = MockupPipeline() + pipeline = MockupPipeline(temporary_data_dir) pipeline.subject_list = ['001', '002'] # Wrong length for nb_of_outputs @@ -343,7 +331,7 @@ def test_test_correlation_results(mocker): @staticmethod @mark.unit_test - def test_test_pipeline_execution(mocker, set_test_directory): + def test_test_pipeline_execution(mocker, temporary_data_dir): """ Test the test_pipeline_execution helper """ # Set subgroups of subjects @@ -352,7 +340,7 @@ def test_test_pipeline_execution(mocker, set_test_directory): # Create mocks mocker.patch('conftest.get_correlation_coefficient', return_value = 1.0) fake_runner = PipelineRunner('2T6S') - fake_runner._pipeline = MockupPipeline() + fake_runner._pipeline = MockupPipeline(temporary_data_dir) mocker.patch('conftest.PipelineRunner', return_value = fake_runner) mocker.patch('conftest.ResultsCollection', return_value = MockupResultsCollection('2T6S')) @@ -360,13 +348,13 @@ def test_test_pipeline_execution(mocker, set_test_directory): helpers.test_pipeline_execution('test_conftest', 7) # Check outputs - assert isdir(join(TEST_DIR, 'TestConftest_preprocessing_workflow')) - assert isdir(join(TEST_DIR, 'TestConftest_run_level_workflow')) - assert isdir(join(TEST_DIR, 'TestConftest_subject_level_workflow')) - assert isdir(join(TEST_DIR, 'TestConftest_group_level_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_preprocessing_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_run_level_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_subject_level_workflow')) + assert isdir(join(temporary_data_dir, 'TestConftest_group_level_workflow')) # Check executions - with open(join(TEST_DIR, 'test_conftest.txt'), 'r', encoding = 'utf-8') as file: + with open(join(temporary_data_dir, 'test_conftest.txt'), 'r', encoding = 'utf-8') as file: assert file.readline() == '0\n' # First exec of preprocessing creates an exception (execution counter == 1) assert file.readline() == 'TestConftest_preprocessing_workflow 4 1\n' diff --git a/tests/test_data/pipelines/team_R9K3/confounds.tsv b/tests/test_data/pipelines/team_R9K3/confounds.tsv new file mode 100644 index 00000000..cf63c178 --- /dev/null +++ b/tests/test_data/pipelines/team_R9K3/confounds.tsv @@ -0,0 +1,3 @@ +0.0 0.0 0.0 0.0 -0.0 0.0 +-0.00996895 -0.0313444 -3.00931e-06 0.00132687 -0.000384193 -0.00016819 +-2.56954e-05 -0.00923735 0.0549667 0.000997278 -0.00019745 -0.000398988 From 5ffd5d9083f1702e71b915f3b291395f6af3e382 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 26 Feb 2024 15:39:43 +0100 Subject: [PATCH 2/7] [BUG] smoothing outputs --- narps_open/pipelines/__init__.py | 2 +- narps_open/pipelines/team_R9K3.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index 66cab6f5..d93845c3 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -66,7 +66,7 @@ 'R42Q': None, 'R5K7': None, 'R7D1': None, - 'R9K3': 'PipelineTeam08MQ', + 'R9K3': 'PipelineTeamR9K3', 'SM54': None, 'T54A': 'PipelineTeamT54A', 'U26C': 'PipelineTeamU26C', diff --git a/narps_open/pipelines/team_R9K3.py b/narps_open/pipelines/team_R9K3.py index 3aba32b7..3de87d8d 100644 --- a/narps_open/pipelines/team_R9K3.py +++ b/narps_open/pipelines/team_R9K3.py @@ -105,9 +105,9 @@ def get_preprocessing_outputs(self): output_dir = join(self.directories.output_dir, 'preprocessing', '_subject_id_{subject_id}') - # Smoothing outputs # TODO + # Smoothing outputs templates = [join(output_dir, f'_smoothing{index}', - 'srsub-{subject_id}'+f'_task-MGT_run-{run_id}_bold.nii')\ + 'ssub-{subject_id}'+f'_task-MGT_run-{run_id}_bold.nii')\ for index, run_id in zip(range(len(self.run_list)), self.run_list)] # Format with subject_ids @@ -229,7 +229,7 @@ def get_subject_level_analysis(self): 'confounds' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_bold_confounds.tsv'), 'func' : join(self.directories.output_dir, 'preprocessing', '_subject_id_{subject_id}', - '_smoothing*', 'srsub-{subject_id}_task-MGT_run-*_bold.nii'), # TODO + '_smoothing*', 'ssub-{subject_id}_task-MGT_run-*_bold.nii'), 'event' : join('sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_events.tsv'), } From 06212fe05701b2ef3f160dd37875e258e90127cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 26 Feb 2024 15:51:01 +0100 Subject: [PATCH 3/7] [BUG] contrast definitions --- narps_open/pipelines/team_R9K3.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_R9K3.py b/narps_open/pipelines/team_R9K3.py index 3de87d8d..61d15304 100644 --- a/narps_open/pipelines/team_R9K3.py +++ b/narps_open/pipelines/team_R9K3.py @@ -36,8 +36,8 @@ def __init__(self): self.contrast_list = ['0001', '0002'] conditions = ['trialxgain^1', 'trialxloss^1'] self.subject_level_contrasts = [ - ['effect_of_gain', 'T', conditions, [0, 1, 0]], - ['effect_of_loss', 'T', conditions, [0, 0, 1]] + ['effect_of_gain', 'T', conditions, [1, 0]], + ['effect_of_loss', 'T', conditions, [0, 1]] ] def get_preprocessing(self): @@ -139,7 +139,7 @@ def get_subject_information(event_file): weights_gain = [] weights_loss = [] - with open(event_file, "rt") as file: + with open(event_file, 'rt') as file: next(file) # skip the header for line in file: From 0bdc4ad2c488bcd495b7324e6a26fc8242ae11e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 26 Feb 2024 16:04:47 +0100 Subject: [PATCH 4/7] [BUG] input func from derivs --- narps_open/pipelines/team_R9K3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_R9K3.py b/narps_open/pipelines/team_R9K3.py index 61d15304..f843e8f0 100644 --- a/narps_open/pipelines/team_R9K3.py +++ b/narps_open/pipelines/team_R9K3.py @@ -60,8 +60,8 @@ def get_preprocessing(self): # SELECT FILES - to select necessary files templates = { - 'func' : join('sub-{subject_id}', 'func', - 'sub-{subject_id}_task-MGT_run-*_bold.nii.gz'), + 'func' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_preproc.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir From 78dbe4467563ae6b4cf5c1ff513a20477c0c12b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Mar 2024 09:20:01 +0100 Subject: [PATCH 5/7] Select file template naming error --- narps_open/pipelines/team_R9K3.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_R9K3.py b/narps_open/pipelines/team_R9K3.py index f843e8f0..2bf3330b 100644 --- a/narps_open/pipelines/team_R9K3.py +++ b/narps_open/pipelines/team_R9K3.py @@ -229,9 +229,10 @@ def get_subject_level_analysis(self): 'confounds' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_bold_confounds.tsv'), 'func' : join(self.directories.output_dir, 'preprocessing', '_subject_id_{subject_id}', - '_smoothing*', 'ssub-{subject_id}_task-MGT_run-*_bold.nii'), + '_smoothing*', + 'ssub-{subject_id}_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_preproc.nii'), 'event' : join('sub-{subject_id}', 'func', - 'sub-{subject_id}_task-MGT_run-*_events.tsv'), + 'sub-{subject_id}_task-MGT_run-*_events.tsv') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir From 6b322bd259576b241cd7518084f0f047af33d120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 15 Mar 2024 11:22:05 +0100 Subject: [PATCH 6/7] Correcting hypothesis files --- narps_open/pipelines/team_R9K3.py | 36 +++++++++++++++---------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/narps_open/pipelines/team_R9K3.py b/narps_open/pipelines/team_R9K3.py index 2bf3330b..02523faa 100644 --- a/narps_open/pipelines/team_R9K3.py +++ b/narps_open/pipelines/team_R9K3.py @@ -590,40 +590,40 @@ def get_hypotheses_outputs(self): nb_sub = len(self.subject_list) files = [ join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0002', 'spmT_0001.nii'), + '_contrast_id_0001', 'spmT_0001.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', 'spmT_0001.nii'), + '_contrast_id_0001', 'spmT_0001.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0002', 'spmT_0001.nii'), + '_contrast_id_0001', 'spmT_0001.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', 'spmT_0001.nii'), + '_contrast_id_0001', 'spmT_0001.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0003', '_threshold1', 'spmT_0002_thr.nii'), + '_contrast_id_0002', '_threshold1', 'spmT_0002_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0003', 'spmT_0002.nii'), + '_contrast_id_0002', 'spmT_0002.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0003', '_threshold1', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold1', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0003', 'spmT_0001.nii'), + '_contrast_id_0002', 'spmT_0001.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_0003', 'spmT_0001.nii'), + '_contrast_id_0002', 'spmT_0001.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0003', '_threshold0', 'spmT_0002_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0002_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0003', 'spmT_0002.nii'), + '_contrast_id_0002', 'spmT_0002.nii'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_0003', 'spmT_0001.nii') + '_contrast_id_0002', 'spmT_0001.nii') ] return [join(self.directories.output_dir, f) for f in files] From b9222b484529d59b98e61c9d6b181b3ea0b3fd0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 15 Mar 2024 11:28:24 +0100 Subject: [PATCH 7/7] Hypotheses files names --- narps_open/pipelines/team_R9K3.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_R9K3.py b/narps_open/pipelines/team_R9K3.py index 02523faa..2946094b 100644 --- a/narps_open/pipelines/team_R9K3.py +++ b/narps_open/pipelines/team_R9K3.py @@ -589,38 +589,47 @@ def get_hypotheses_outputs(self): """ nb_sub = len(self.subject_list) files = [ + # Hypothesis 1 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0001', 'spmT_0001.nii'), + # Hypothesis 2 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0001', 'spmT_0001.nii'), + # Hypothesis 3 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0001', 'spmT_0001.nii'), + # Hypothesis 4 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0001', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0001', 'spmT_0001.nii'), + # Hypothesis 5 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold1', 'spmT_0002_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0002.nii'), + # Hypothesis 6 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', '_threshold1', 'spmT_0001_thr.nii'), + '_contrast_id_0002', '_threshold1', 'spmT_0002_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', 'spmT_0001.nii'), + '_contrast_id_0002', 'spmT_0002.nii'), + # Hypothesis 7 join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 8 join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', '_threshold0', 'spmT_0002_thr.nii'), + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_0002', 'spmT_0002.nii'), + '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 9 join(f'group_level_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}',