From 7dcd88f177f5d85fdd3cc6f24136430887008988 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 12 Apr 2024 16:35:59 +0200 Subject: [PATCH 01/17] First version of 4SZ2 reproduction [skip ci] --- narps_open/pipelines/team_4SZ2.py | 684 ++++++++++++++++++++++++++++++ tests/pipelines/test_team_4SZ2.py | 93 ++++ 2 files changed, 777 insertions(+) create mode 100644 narps_open/pipelines/team_4SZ2.py create mode 100644 tests/pipelines/test_team_4SZ2.py diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py new file mode 100644 index 00000000..23e1d998 --- /dev/null +++ b/narps_open/pipelines/team_4SZ2.py @@ -0,0 +1,684 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Write the work of NARPS team 4SZ2 using Nipype """ + +from os.path import join +from itertools import product + +from nipype import Workflow, Node, MapNode +from nipype.interfaces.utility import IdentityInterface, Function, Split +from nipype.interfaces.io import SelectFiles, DataSink +from nipype.interfaces.fsl import ( + IsotropicSmooth, Level1Design, FEATModel, + L2Model, Merge, FLAMEO, FILMGLS, MultipleRegressDesign, + FSLCommand, Randomise + ) +from nipype.algorithms.modelgen import SpecifyModel +from nipype.interfaces.fsl.maths import MultiImageMaths + +from narps_open.utils.configuration import Configuration +from narps_open.pipelines import Pipeline +from narps_open.data.task import TaskInformation +from narps_open.data.participants import get_group +from narps_open.core.common import list_intersection, elements_in_string, clean_list +from narps_open.core.interfaces import InterfaceFactory + +# Setup FSL +FSLCommand.set_default_output_type('NIFTI_GZ') + +class PipelineTeam4SZ2(Pipeline): + """ A class that defines the pipeline of team 4SZ2 """ + + def __init__(self): + super().__init__() + self.fwhm = 5.0 + self.team_id = '4SZ2' + self.contrast_list = ['1', '2'] + self.run_level_contrasts = [ + ('effect_of_gain', 'T', ['gain', 'loss'], [1, 0]), + ('effect_of_loss', 'T', ['gain', 'loss'], [0, 1]) + ] + + def get_preprocessing(self): + """ No preprocessing has been done by team 4SZ2 """ + return None + + def get_subject_information(event_file): + """ + Create Bunchs for specifyModel. + + Parameters : + - event_file : str, file corresponding to the run and the subject to analyze + + Returns : + - subject_info : list of Bunch for 1st level analysis. + """ + from nipype.interfaces.base import Bunch + + onsets = [] + durations = [] + amplitudes_gain = [] + amplitudes_loss = [] + + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + onsets.append(float(info[0])) + durations.append(float(info[1])) + amplitudes_gain.append(float(info[2])) + amplitudes_loss.append(float(info[3])) + + return [ + Bunch( + conditions = ['gain', 'loss', 'gain_derivative', 'loss_derivative'], + onsets = [onsets] * 4, + durations = [durations] * 4, + amplitudes = [amplitudes_gain, amplitudes_loss] + ) + ] + + def get_run_level_analysis(self): + """ + Create the run level analysis workflow. + + Returns: + - run_level : nipype.WorkFlow + """ + # Create run level analysis workflow and connect its nodes + run_level = Workflow( + base_dir = self.directories.working_dir, + name = 'run_level_analysis' + ) + + # IdentityInterface Node - Iterate on subject and runs + information_source = Node(IdentityInterface( + fields = ['subject_id', 'run_id']), + name = 'information_source') + information_source.iterables = [ + ('subject_id', self.subject_list), + ('run_id', self.run_list) + ] + + # SelectFiles - Get necessary files + templates = { + 'func' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_space-MNI152NLin2009cAsym_preproc.nii.gz'), + 'events' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_events.tsv') + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + run_level.connect(information_source, 'subject_id', select_files, 'subject_id') + run_level.connect(information_source, 'run_id', select_files, 'run_id') + + # IsotropicSmooth Node - Smoothing data + smoothing_func = Node(IsotropicSmooth(), name = 'smoothing_func') + smoothing_func.inputs.fwhm = self.fwhm + run_level.connect(select_files, 'func', smoothing_func, 'in_file') + + # Get Subject Info - get subject specific condition information + subject_information = Node(Function( + function = self.get_subject_information, + input_names = ['event_file'], + output_names = ['subject_info'] + ), name = 'subject_information') + run_level.connect(select_files, 'events', subject_information, 'event_file') + + # SpecifyModel Node - Generate run level model + specify_model = Node(SpecifyModel(), name = 'specify_model') + specify_model.inputs.high_pass_filter_cutoff = 100 + specify_model.inputs.input_units = 'secs' + specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] + run_level.connect(smoothing_func, 'out_file', specify_model, 'functional_runs') + run_level.connect(subject_information, 'subject_info', specify_model, 'subject_info') + + # Level1Design Node - Generate files for run level computation + model_design = Node(Level1Design(), name = 'model_design') + model_design.inputs.bases = {'dgamma' : {'derivs' : False }} + model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + model_design.inputs.model_serial_correlations = True + model_design.inputs.contrasts = self.run_level_contrasts + run_level.connect(specify_model, 'session_info', model_design, 'session_info') + + # FEATModel Node - Generate run level model + model_generation = Node(FEATModel(), name = 'model_generation') + run_level.connect(model_design, 'ev_files', model_generation, 'ev_files') + run_level.connect(model_design, 'fsf_files', model_generation, 'fsf_file') + + # FILMGLS Node - Estimate first level model + model_estimate = Node(FILMGLS(), name='model_estimate') + run_level.connect(smoothing_func, 'out_file', model_estimate, 'in_file') + run_level.connect(model_generation, 'con_file', model_estimate, 'tcon_file') + run_level.connect(model_generation, 'design_file', model_estimate, 'design_file') + + # DataSink Node - store the wanted results in the wanted directory + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + run_level.connect(model_estimate, 'results_dir', data_sink, 'run_level_analysis.@results') + run_level.connect( + model_generation, 'design_file', data_sink, 'run_level_analysis.@design_file') + run_level.connect( + model_generation, 'design_image', data_sink, 'run_level_analysis.@design_img') + + # Remove large files, if requested + if Configuration()['pipelines']['remove_unused_data']: + remove_smooth = Node( + InterfaceFactory.create('remove_parent_directory'), + name = 'remove_smooth') + run_level.connect(data_sink, 'out_file', remove_smooth, '_') + run_level.connect(smoothing_func, 'out_file', remove_smooth, 'file_name') + + return run_level + + def get_run_level_outputs(self): + """ Return the names of the files the run level analysis is supposed to generate. """ + + parameters = { + 'run_id' : self.run_list, + 'subject_id' : self.subject_list, + 'contrast_id' : self.contrast_list, + } + parameter_sets = product(*parameters.values()) + output_dir = join(self.directories.output_dir, + 'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}') + templates = [ + join(output_dir, 'results', 'cope{contrast_id}.nii.gz'), + join(output_dir, 'results', 'tstat{contrast_id}.nii.gz'), + join(output_dir, 'results', 'varcope{contrast_id}.nii.gz'), + join(output_dir, 'results', 'zstat{contrast_id}.nii.gz') + ] + return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets for template in templates] + + def get_subject_level_analysis(self): + """ + Create the subject level analysis workflow. + + Returns: + - subject_level_analysis : nipype.WorkFlow + """ + # Second level (single-subject, mean of all four scans) analysis workflow. + subject_level = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis') + + # Infosource Node - To iterate on subject and runs + information_source = Node(IdentityInterface( + fields = ['subject_id', 'contrast_id']), + name = 'information_source') + information_source.iterables = [ + ('subject_id', self.subject_list), + ('contrast_id', self.contrast_list) + ] + + # SelectFiles node - to select necessary files + templates = { + 'cope' : join(self.directories.output_dir, + 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', + 'cope{contrast_id}.nii.gz'), + 'varcope' : join(self.directories.output_dir, + 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', + 'varcope{contrast_id}.nii.gz'), + 'masks' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_space-MNI152NLin2009cAsym_brainmask.nii.gz') + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory= self.directories.results_dir + subject_level.connect(information_source, 'subject_id', select_files, 'subject_id') + subject_level.connect(information_source, 'contrast_id', select_files, 'contrast_id') + + # Merge Node - Merge copes files for each subject + merge_copes = Node(Merge(), name = 'merge_copes') + merge_copes.inputs.dimension = 't' + subject_level.connect(select_files, 'cope', merge_copes, 'in_files') + + # Merge Node - Merge varcopes files for each subject + merge_varcopes = Node(Merge(), name = 'merge_varcopes') + merge_varcopes.inputs.dimension = 't' + subject_level.connect(select_files, 'varcope', merge_varcopes, 'in_files') + + # Split Node - Split mask list to serve them as inputs of the MultiImageMaths node. + split_masks = Node(Split(), name = 'split_masks') + split_masks.inputs.splits = [1, len(self.run_list) - 1] + split_masks.inputs.squeeze = True # Unfold one-element splits removing the list + subject_level.connect(select_files, 'masks', split_masks, 'inlist') + + # MultiImageMaths Node - Create a subject mask by + # computing the intersection of all run masks. + mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') + mask_intersection.inputs.op_string = '-mul %s ' * (len(self.run_list) - 1) + subject_level.connect(split_masks, 'out1', mask_intersection, 'in_file') + subject_level.connect(split_masks, 'out2', mask_intersection, 'operand_files') + + # L2Model Node - Generate subject specific second level model + generate_model = Node(L2Model(), name = 'generate_model') + generate_model.inputs.num_copes = len(self.run_list) + + # FLAMEO Node - Estimate model + estimate_model = Node(FLAMEO(), name = 'estimate_model') + estimate_model.inputs.run_mode = 'flame1' + subject_level.connect(mask_intersection, 'out_file', estimate_model, 'mask_file') + subject_level.connect(merge_copes, 'merged_file', estimate_model, 'cope_file') + subject_level.connect(merge_varcopes, 'merged_file', estimate_model, 'var_cope_file') + subject_level.connect(generate_model, 'design_mat', estimate_model, 'design_file') + subject_level.connect(generate_model, 'design_con', estimate_model, 't_con_file') + subject_level.connect(generate_model, 'design_grp', estimate_model, 'cov_split_file') + + # DataSink Node - store the wanted results in the wanted directory + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + subject_level.connect( + mask_intersection, 'out_file', data_sink, 'subject_level_analysis.@mask') + subject_level.connect(estimate_model, 'zstats', data_sink, 'subject_level_analysis.@stats') + subject_level.connect( + estimate_model, 'tstats', data_sink, 'subject_level_analysis.@tstats') + subject_level.connect(estimate_model, 'copes', data_sink, 'subject_level_analysis.@copes') + subject_level.connect( + estimate_model, 'var_copes', data_sink, 'subject_level_analysis.@varcopes') + + return subject_level + + def get_subject_level_outputs(self): + """ Return the names of the files the subject level analysis is supposed to generate. """ + + parameters = { + 'contrast_id' : self.contrast_list, + 'subject_id' : self.subject_list, + 'file' : ['cope1.nii.gz', 'tstat1.nii.gz', 'varcope1.nii.gz', 'zstat1.nii.gz'] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}','{file}' + ) + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + parameters = { + 'contrast_id' : self.contrast_list, + 'subject_id' : self.subject_list, + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}', + 'sub-{subject_id}_task-MGT_run-01_bold_space-MNI152NLin2009cAsym_preproc_brain_mask_maths.nii.gz' + ) + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + return return_list + + def get_one_sample_t_test_regressors(subject_list: list) -> dict: + """ + Create dictionary of regressors for one sample t-test group analysis. + + Parameters: + - subject_list: ids of subject in the group for which to do the analysis + + Returns: + - dict containing named lists of regressors. + """ + + return dict(group_mean = [1 for _ in subject_list]) + + def get_two_sample_t_test_regressors( + equal_range_ids: list, + equal_indifference_ids: list, + subject_list: list, + ) -> dict: + """ + Create dictionary of regressors for two sample t-test group analysis. + + Parameters: + - equal_range_ids: ids of subjects in equal range group + - equal_indifference_ids: ids of subjects in equal indifference group + - subject_list: ids of subject for which to do the analysis + + Returns: + - regressors, dict: containing named lists of regressors. + - groups, list: group identifiers to distinguish groups in FSL analysis. + """ + + # Create 2 lists containing n_sub values which are + # * 1 if the participant is on the group + # * 0 otherwise + equal_range_regressors = [1 if i in equal_range_ids else 0 for i in subject_list] + equal_indifference_regressors = [ + 1 if i in equal_indifference_ids else 0 for i in subject_list + ] + + # Create regressors output : a dict with the two list + regressors = dict( + equalRange = equal_range_regressors, + equalIndifference = equal_indifference_regressors + ) + + # Create groups outputs : a list with 1 for equalRange subjects and 2 for equalIndifference + groups = [1 if i == 1 else 2 for i in equal_range_regressors] + + return regressors, groups + + def get_group_level_analysis(self): + """ + Return all workflows for the group level analysis. + + Returns; + - a list of nipype.WorkFlow + """ + + methods = ['equalRange', 'equalIndifference', 'groupComp'] + return [self.get_group_level_analysis_sub_workflow(method) for method in methods] + + def get_group_level_analysis_sub_workflow(self, method): + """ + Return a workflow for the group level analysis. + + Parameters: + - method: one of 'equalRange', 'equalIndifference' or 'groupComp' + + Returns: + - group_level: nipype.WorkFlow + """ + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Declare the workflow + group_level = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}') + + # Infosource Node - iterate over the contrasts generated by the subject level analysis + information_source = Node(IdentityInterface( + fields = ['contrast_id']), + name = 'information_source') + information_source.iterables = [('contrast_id', self.contrast_list)] + + # SelectFiles Node - select necessary files + templates = { + 'cope' : join(self.directories.output_dir, + 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', + 'cope1.nii.gz'), + 'varcope' : join(self.directories.output_dir, + 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', + 'varcope1.nii.gz'), + 'masks': join(self.directories.output_dir, + 'subject_level_analysis', '_contrast_id_1_subject_id_*', + 'sub-*_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_preproc_brain_mask_maths.nii.gz') + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.results_dir + group_level.connect(information_source, 'contrast_id', select_files, 'contrast_id') + + # Function Node elements_in_string + # Get contrast of parameter estimates (cope) for these subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_copes = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_copes', iterfield = 'input_str' + ) + group_level.connect(select_files, 'cope', get_copes, 'input_str') + + # Function Node elements_in_string + # Get variance of the estimated copes (varcope) for these subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_varcopes = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_varcopes', iterfield = 'input_str' + ) + group_level.connect(select_files, 'varcope', get_varcopes, 'input_str') + + # Merge Node - Merge cope files + merge_copes = Node(Merge(), name = 'merge_copes') + merge_copes.inputs.dimension = 't' + group_level.connect(get_copes, ('out_list', clean_list), merge_copes, 'in_files') + + # Merge Node - Merge cope files + merge_varcopes = Node(Merge(), name = 'merge_varcopes') + merge_varcopes.inputs.dimension = 't' + group_level.connect(get_varcopes, ('out_list', clean_list), merge_varcopes, 'in_files') + + # Split Node - Split mask list to serve them as inputs of the MultiImageMaths node. + split_masks = Node(Split(), name = 'split_masks') + split_masks.inputs.splits = [1, len(self.subject_list) - 1] + split_masks.inputs.squeeze = True # Unfold one-element splits removing the list + group_level.connect(select_files, 'masks', split_masks, 'inlist') + + # MultiImageMaths Node - Create a subject mask by + # computing the intersection of all run masks. + mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') + mask_intersection.inputs.op_string = '-mul %s ' * (len(self.subject_list) - 1) + group_level.connect(split_masks, 'out1', mask_intersection, 'in_file') + group_level.connect(split_masks, 'out2', mask_intersection, 'operand_files') + + # MultipleRegressDesign Node - Specify model + specify_model = Node(MultipleRegressDesign(), name = 'specify_model') + + # FLAMEO Node - Estimate model + estimate_model = Node(FLAMEO(), name = 'estimate_model') + estimate_model.inputs.run_mode = 'flame1' + group_level.connect(mask_intersection, 'out_file', estimate_model, 'mask_file') + group_level.connect(merge_copes, 'merged_file', estimate_model, 'cope_file') + group_level.connect(merge_varcopes, 'merged_file', estimate_model, 'var_cope_file') + group_level.connect(specify_model, 'design_mat', estimate_model, 'design_file') + group_level.connect(specify_model, 'design_con', estimate_model, 't_con_file') + group_level.connect(specify_model, 'design_grp', estimate_model, 'cov_split_file') + + # Cluster Node - Perform clustering on statistical output + cluster = Node(Cluster(), name = 'cluster') + # TODO : add parameters + group_level.connect(estimate_model, 'zstats', cluster, 'in_file') + group_level.connect(estimate_model, 'copes', cluster, 'cope_file') + + # Datasink Node - Save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + group_level.connect(estimate_model, 'zstats', data_sink, + f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats') + group_level.connect(estimate_model, 'tstats', data_sink, + f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats') + group_level.connect(randomise,'t_corrected_p_files', data_sink, + f'group_level_analysis_{method}_nsub_{nb_subjects}.@t_corrected_p_files') + group_level.connect(randomise,'t_p_files', data_sink, + f'group_level_analysis_{method}_nsub_{nb_subjects}.@t_p_files') + + if method in ('equalIndifference', 'equalRange'): + # Setup a one sample t-test + specify_model.inputs.contrasts = [ + ['group_mean', 'T', ['group_mean'], [1]], + ['group_mean_neg', 'T', ['group_mean'], [-1]] + ] + + # Function Node get_group_subjects - Get subjects in the group and in the subject_list + get_group_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_group_subjects' + ) + get_group_subjects.inputs.list_1 = get_group(method) + get_group_subjects.inputs.list_2 = self.subject_list + group_level.connect(get_group_subjects, 'out_list', get_copes, 'elements') + group_level.connect(get_group_subjects, 'out_list', get_varcopes, 'elements') + + # Function Node get_one_sample_t_test_regressors + # Get regressors in the equalRange and equalIndifference method case + regressors_one_sample = Node( + Function( + function = self.get_one_sample_t_test_regressors, + input_names = ['subject_list'], + output_names = ['regressors'] + ), + name = 'regressors_one_sample', + ) + group_level.connect(get_group_subjects, 'out_list', regressors_one_sample, 'subject_list') + group_level.connect(regressors_one_sample, 'regressors', specify_model, 'regressors') + + elif method == 'groupComp': + + # Select copes and varcopes corresponding to the selected subjects + # Indeed the SelectFiles node asks for all (*) subjects available + get_copes.inputs.elements = self.subject_list + get_varcopes.inputs.elements = self.subject_list + + # Setup a two sample t-test + specify_model.inputs.contrasts = [ + ['equalRange_sup', 'T', ['equalRange', 'equalIndifference'], [1, -1]] + ] + + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Function Node get_two_sample_t_test_regressors + # Get regressors in the groupComp method case + regressors_two_sample = Node( + Function( + function = self.get_two_sample_t_test_regressors, + input_names = [ + 'equal_range_ids', + 'equal_indifference_ids', + 'subject_list', + ], + output_names = ['regressors', 'groups'] + ), + name = 'regressors_two_sample', + ) + regressors_two_sample.inputs.subject_list = self.subject_list + + # Add missing connections + group_level.connect( + get_equal_range_subjects, 'out_list', regressors_two_sample, 'equal_range_ids') + group_level.connect( + get_equal_indifference_subjects, 'out_list', + regressors_two_sample, 'equal_indifference_ids') + group_level.connect(regressors_two_sample, 'regressors', specify_model, 'regressors') + group_level.connect(regressors_two_sample, 'groups', specify_model, 'groups') + + return group_level + + def get_group_level_outputs(self): + """ Return all names for the files the group level analysis is supposed to generate. """ + + # Handle equalRange and equalIndifference + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['equalRange', 'equalIndifference'], + 'file': [ + '_cluster0/zstat1_pval.nii.gz', # TODO : output for randomise + '_cluster0/zstat1_threshold.nii.gz', + '_cluster1/zstat2_pval.nii.gz', + '_cluster1/zstat2_threshold.nii.gz', + 'tstat1.nii.gz', + 'tstat2.nii.gz', + 'zstat1.nii.gz', + 'zstat2.nii.gz' + ] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'group_level_analysis_{method}_nsub_'+f'{len(self.subject_list)}', + '_contrast_id_{contrast_id}', + '{file}' + ) + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + # Handle groupComp + parameters = { + 'contrast_id': self.contrast_list, + 'file': [ + '_cluster0/zstat1_pval.nii.gz', # TODO : output for randomise + '_cluster0/zstat1_threshold.nii.gz', + 'tstat1.nii.gz', + 'zstat1.nii.gz' + ] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + f'group_level_analysis_groupComp_nsub_{len(self.subject_list)}', + '_contrast_id_{contrast_id}', + '{file}' + ) + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + return return_list + + def get_hypotheses_outputs(self): + """ Return all hypotheses output file names. """ + + nb_sub = len(self.subject_list) + files = [ + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_1', 'zstat1.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_1', 'zstat1.nii.gz'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_1', 'zstat1.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_1', 'zstat1.nii.gz'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_2', 'zstat1.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_2', 'zstat1.nii.gz'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_2', 'zstat1.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_2', 'zstat1.nii.gz'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_2', 'zstat1.nii.gz') + ] + return [join(self.directories.output_dir, f) for f in files] diff --git a/tests/pipelines/test_team_4SZ2.py b/tests/pipelines/test_team_4SZ2.py new file mode 100644 index 00000000..e9c4be9a --- /dev/null +++ b/tests/pipelines/test_team_4SZ2.py @@ -0,0 +1,93 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'narps_open.pipelines.team_4SZ2' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_team_4SZ2.py + pytest -q test_team_4SZ2.py -k +""" +from os.path import join, exists, abspath +from filecmp import cmp + +from pytest import helpers, mark +from nipype import Workflow, Node, Function +from nipype.interfaces.base import Bunch + +from narps_open.utils.configuration import Configuration +from narps_open.pipelines.team_4SZ2 import PipelineTeam4SZ2 + +class TestPipelinesTeam4SZ2: + """ A class that contains all the unit tests for the PipelineTeam4SZ2 class.""" + + @staticmethod + @mark.unit_test + def test_create(): + """ Test the creation of a PipelineTeam4SZ2 object """ + + pipeline = PipelineTeam4SZ2() + + # 1 - check the parameters + assert pipeline.fwhm == 6.0 + assert pipeline.team_id == '4SZ2' + + # 2 - check workflows + assert pipeline.get_preprocessing() is None + assert isinstance(pipeline.get_run_level_analysis(), Workflow) + assert isinstance(pipeline.get_subject_level_analysis(), Workflow) + group_level = pipeline.get_group_level_analysis() + assert len(group_level) == 3 + for sub_workflow in group_level: + assert isinstance(sub_workflow, Workflow) + + @staticmethod + @mark.unit_test + def test_outputs(): + """ Test the expected outputs of a PipelineTeam4SZ2 object """ + + pipeline = PipelineTeam4SZ2() + + # 1 - 1 subject outputs + pipeline.subject_list = ['001'] + helpers.test_pipeline_outputs(pipeline, [0, 4*1*2*4, 4*2*1 + 2*1, 8*4*2 + 4*4, 18]) + + # 2 - 4 subjects outputs + pipeline.subject_list = ['001', '002', '003', '004'] + helpers.test_pipeline_outputs(pipeline, [0, 4*4*2*4, 4*2*4 + 2*4, 8*4*2 + 4*4, 18]) + + @staticmethod + @mark.unit_test + def test_subject_information(): + """ Test the get_subject_information method """ + + # Get test files + test_file = join(Configuration()['directories']['test_data'], 'pipelines', 'events.tsv') + + # Prepare several scenarii + info_missed = PipelineTeam4SZ2.get_subject_information(test_file) + + # Compare bunches to expected + bunch = info_missed[0] + assert isinstance(bunch, Bunch) + assert bunch.conditions == ['trial', 'gain', 'loss'] + helpers.compare_float_2d_arrays(bunch.onsets, [ + [4.071, 11.834, 19.535, 27.535, 36.435], + [4.071, 11.834, 19.535, 27.535, 36.435] + ]) + helpers.compare_float_2d_arrays(bunch.durations, [ + [4.0, 4.0, 4.0, 4.0, 4.0], + [4.0, 4.0, 4.0, 4.0, 4.0] + ]) + helpers.compare_float_2d_arrays(bunch.amplitudes, [ + [14.0, 34.0, 38.0, 10.0, 16.0], + [6.0, 14.0, 19.0, 15.0, 17.0] + ]) + + @staticmethod + @mark.pipeline_test + def test_execution(): + """ Test the execution of a PipelineTeam4SZ2 and compare results """ + helpers.test_pipeline_evaluation('4SZ2') From 6c3d996c973889b79f689157a6496cf8759d19ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 15 Apr 2024 11:21:10 +0200 Subject: [PATCH 02/17] Starting group level [skip ci@ --- narps_open/pipelines/__init__.py | 2 +- narps_open/pipelines/team_4SZ2.py | 134 ++---------------------------- 2 files changed, 10 insertions(+), 126 deletions(-) diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index 4294853c..4b1bb767 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -26,7 +26,7 @@ '3TR7': 'PipelineTeam3TR7', '43FJ': None, '46CD': None, - '4SZ2': None, + '4SZ2': 'PipelineTeam4SZ2', '4TQ6': None, '50GV': None, '51PW': 'PipelineTeam51PW', diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index 23e1d998..31994900 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -137,7 +137,7 @@ def get_run_level_analysis(self): # Level1Design Node - Generate files for run level computation model_design = Node(Level1Design(), name = 'model_design') - model_design.inputs.bases = {'dgamma' : {'derivs' : False }} + model_design.inputs.bases = {'dgamma' : {'derivs' : True }} model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] model_design.inputs.model_serial_correlations = True model_design.inputs.contrasts = self.run_level_contrasts @@ -194,123 +194,8 @@ def get_run_level_outputs(self): for parameter_values in parameter_sets for template in templates] def get_subject_level_analysis(self): - """ - Create the subject level analysis workflow. - - Returns: - - subject_level_analysis : nipype.WorkFlow - """ - # Second level (single-subject, mean of all four scans) analysis workflow. - subject_level = Workflow( - base_dir = self.directories.working_dir, - name = 'subject_level_analysis') - - # Infosource Node - To iterate on subject and runs - information_source = Node(IdentityInterface( - fields = ['subject_id', 'contrast_id']), - name = 'information_source') - information_source.iterables = [ - ('subject_id', self.subject_list), - ('contrast_id', self.contrast_list) - ] - - # SelectFiles node - to select necessary files - templates = { - 'cope' : join(self.directories.output_dir, - 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', - 'cope{contrast_id}.nii.gz'), - 'varcope' : join(self.directories.output_dir, - 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', - 'varcope{contrast_id}.nii.gz'), - 'masks' : join('derivatives', 'fmriprep', 'sub-{subject_id}', 'func', - 'sub-{subject_id}_task-MGT_run-{run_id}_bold_space-MNI152NLin2009cAsym_brainmask.nii.gz') - } - select_files = Node(SelectFiles(templates), name = 'select_files') - select_files.inputs.base_directory= self.directories.results_dir - subject_level.connect(information_source, 'subject_id', select_files, 'subject_id') - subject_level.connect(information_source, 'contrast_id', select_files, 'contrast_id') - - # Merge Node - Merge copes files for each subject - merge_copes = Node(Merge(), name = 'merge_copes') - merge_copes.inputs.dimension = 't' - subject_level.connect(select_files, 'cope', merge_copes, 'in_files') - - # Merge Node - Merge varcopes files for each subject - merge_varcopes = Node(Merge(), name = 'merge_varcopes') - merge_varcopes.inputs.dimension = 't' - subject_level.connect(select_files, 'varcope', merge_varcopes, 'in_files') - - # Split Node - Split mask list to serve them as inputs of the MultiImageMaths node. - split_masks = Node(Split(), name = 'split_masks') - split_masks.inputs.splits = [1, len(self.run_list) - 1] - split_masks.inputs.squeeze = True # Unfold one-element splits removing the list - subject_level.connect(select_files, 'masks', split_masks, 'inlist') - - # MultiImageMaths Node - Create a subject mask by - # computing the intersection of all run masks. - mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') - mask_intersection.inputs.op_string = '-mul %s ' * (len(self.run_list) - 1) - subject_level.connect(split_masks, 'out1', mask_intersection, 'in_file') - subject_level.connect(split_masks, 'out2', mask_intersection, 'operand_files') - - # L2Model Node - Generate subject specific second level model - generate_model = Node(L2Model(), name = 'generate_model') - generate_model.inputs.num_copes = len(self.run_list) - - # FLAMEO Node - Estimate model - estimate_model = Node(FLAMEO(), name = 'estimate_model') - estimate_model.inputs.run_mode = 'flame1' - subject_level.connect(mask_intersection, 'out_file', estimate_model, 'mask_file') - subject_level.connect(merge_copes, 'merged_file', estimate_model, 'cope_file') - subject_level.connect(merge_varcopes, 'merged_file', estimate_model, 'var_cope_file') - subject_level.connect(generate_model, 'design_mat', estimate_model, 'design_file') - subject_level.connect(generate_model, 'design_con', estimate_model, 't_con_file') - subject_level.connect(generate_model, 'design_grp', estimate_model, 'cov_split_file') - - # DataSink Node - store the wanted results in the wanted directory - data_sink = Node(DataSink(), name = 'data_sink') - data_sink.inputs.base_directory = self.directories.output_dir - subject_level.connect( - mask_intersection, 'out_file', data_sink, 'subject_level_analysis.@mask') - subject_level.connect(estimate_model, 'zstats', data_sink, 'subject_level_analysis.@stats') - subject_level.connect( - estimate_model, 'tstats', data_sink, 'subject_level_analysis.@tstats') - subject_level.connect(estimate_model, 'copes', data_sink, 'subject_level_analysis.@copes') - subject_level.connect( - estimate_model, 'var_copes', data_sink, 'subject_level_analysis.@varcopes') - - return subject_level - - def get_subject_level_outputs(self): - """ Return the names of the files the subject level analysis is supposed to generate. """ - - parameters = { - 'contrast_id' : self.contrast_list, - 'subject_id' : self.subject_list, - 'file' : ['cope1.nii.gz', 'tstat1.nii.gz', 'varcope1.nii.gz', 'zstat1.nii.gz'] - } - parameter_sets = product(*parameters.values()) - template = join( - self.directories.output_dir, - 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}','{file}' - ) - return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ - for parameter_values in parameter_sets] - - parameters = { - 'contrast_id' : self.contrast_list, - 'subject_id' : self.subject_list, - } - parameter_sets = product(*parameters.values()) - template = join( - self.directories.output_dir, - 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}', - 'sub-{subject_id}_task-MGT_run-01_bold_space-MNI152NLin2009cAsym_preproc_brain_mask_maths.nii.gz' - ) - return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ - for parameter_values in parameter_sets] - - return return_list + """ No subject level analysis has been done by team 4SZ2 """ + return None def get_one_sample_t_test_regressors(subject_list: list) -> dict: """ @@ -400,14 +285,13 @@ def get_group_level_analysis_sub_workflow(self, method): # SelectFiles Node - select necessary files templates = { 'cope' : join(self.directories.output_dir, - 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', - 'cope1.nii.gz'), + 'run_level_analysis', '_run_id_*_subject_id_*', 'results', + 'cope{contrast_id}.nii.gz'), 'varcope' : join(self.directories.output_dir, - 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', - 'varcope1.nii.gz'), - 'masks': join(self.directories.output_dir, - 'subject_level_analysis', '_contrast_id_1_subject_id_*', - 'sub-*_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_preproc_brain_mask_maths.nii.gz') + 'run_level_analysis', '_run_id_*_subject_id_*', 'results', + 'varcope{contrast_id}.nii.gz'), + 'masks': join('derivatives', 'fmriprep', 'sub-*', 'func', + 'sub-*_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_brainmask.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.results_dir From 0a1add4f0ef82b9327b7f4a798ffa81941989426 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 15 Apr 2024 13:12:34 +0200 Subject: [PATCH 03/17] Group level and output files --- narps_open/pipelines/team_4SZ2.py | 47 ++++++++++++++++++++++--------- tests/pipelines/test_team_4SZ2.py | 10 +++---- 2 files changed, 38 insertions(+), 19 deletions(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index 31994900..c68ef425 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -12,7 +12,7 @@ from nipype.interfaces.fsl import ( IsotropicSmooth, Level1Design, FEATModel, L2Model, Merge, FLAMEO, FILMGLS, MultipleRegressDesign, - FSLCommand, Randomise + FSLCommand, Cluster ) from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.fsl.maths import MultiImageMaths @@ -73,9 +73,9 @@ def get_subject_information(event_file): return [ Bunch( - conditions = ['gain', 'loss', 'gain_derivative', 'loss_derivative'], - onsets = [onsets] * 4, - durations = [durations] * 4, + conditions = ['gain', 'loss'], + onsets = [onsets] * 2, + durations = [durations] * 2, amplitudes = [amplitudes_gain, amplitudes_loss] ) ] @@ -298,7 +298,7 @@ def get_group_level_analysis_sub_workflow(self, method): group_level.connect(information_source, 'contrast_id', select_files, 'contrast_id') # Function Node elements_in_string - # Get contrast of parameter estimates (cope) for these subjects + # Get contrast of parameter estimates (cope) for subjects in a given group # Note : using a MapNode with elements_in_string requires using clean_list to remove # None values from the out_list get_copes = MapNode(Function( @@ -311,7 +311,7 @@ def get_group_level_analysis_sub_workflow(self, method): group_level.connect(select_files, 'cope', get_copes, 'input_str') # Function Node elements_in_string - # Get variance of the estimated copes (varcope) for these subjects + # Get variance of the estimated copes (varcope) for subjects in a given group # Note : using a MapNode with elements_in_string requires using clean_list to remove # None values from the out_list get_varcopes = MapNode(Function( @@ -323,6 +323,19 @@ def get_group_level_analysis_sub_workflow(self, method): ) group_level.connect(select_files, 'varcope', get_varcopes, 'input_str') + # Function Node elements_in_string + # Get masks for subjects in a given group + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_masks = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_masks', iterfield = 'input_str' + ) + group_level.connect(select_files, 'masks', get_masks, 'input_str') + # Merge Node - Merge cope files merge_copes = Node(Merge(), name = 'merge_copes') merge_copes.inputs.dimension = 't' @@ -337,7 +350,7 @@ def get_group_level_analysis_sub_workflow(self, method): split_masks = Node(Split(), name = 'split_masks') split_masks.inputs.splits = [1, len(self.subject_list) - 1] split_masks.inputs.squeeze = True # Unfold one-element splits removing the list - group_level.connect(select_files, 'masks', split_masks, 'inlist') + group_level.connect(get_masks, ('out_list', clean_list), split_masks, 'inlist') # MultiImageMaths Node - Create a subject mask by # computing the intersection of all run masks. @@ -360,8 +373,14 @@ def get_group_level_analysis_sub_workflow(self, method): group_level.connect(specify_model, 'design_grp', estimate_model, 'cov_split_file') # Cluster Node - Perform clustering on statistical output - cluster = Node(Cluster(), name = 'cluster') - # TODO : add parameters + cluster = MapNode( + Cluster(), + name = 'cluster', + iterfield = ['in_file', 'cope_file'], + synchronize = True + ) + cluster.inputs.threshold = 2.3 + cluster.inputs.out_threshold_file = True group_level.connect(estimate_model, 'zstats', cluster, 'in_file') group_level.connect(estimate_model, 'copes', cluster, 'cope_file') @@ -372,10 +391,8 @@ def get_group_level_analysis_sub_workflow(self, method): f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats') group_level.connect(estimate_model, 'tstats', data_sink, f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats') - group_level.connect(randomise,'t_corrected_p_files', data_sink, - f'group_level_analysis_{method}_nsub_{nb_subjects}.@t_corrected_p_files') - group_level.connect(randomise,'t_p_files', data_sink, - f'group_level_analysis_{method}_nsub_{nb_subjects}.@t_p_files') + group_level.connect(cluster,'threshold_file', data_sink, + f'group_level_analysis_{method}_nsub_{nb_subjects}.@threshold_file') if method in ('equalIndifference', 'equalRange'): # Setup a one sample t-test @@ -396,6 +413,7 @@ def get_group_level_analysis_sub_workflow(self, method): get_group_subjects.inputs.list_2 = self.subject_list group_level.connect(get_group_subjects, 'out_list', get_copes, 'elements') group_level.connect(get_group_subjects, 'out_list', get_varcopes, 'elements') + group_level.connect(get_group_subjects, 'out_list', get_masks, 'elements') # Function Node get_one_sample_t_test_regressors # Get regressors in the equalRange and equalIndifference method case @@ -416,6 +434,7 @@ def get_group_level_analysis_sub_workflow(self, method): # Indeed the SelectFiles node asks for all (*) subjects available get_copes.inputs.elements = self.subject_list get_varcopes.inputs.elements = self.subject_list + get_masks.inputs.elements = self.subject_list # Setup a two sample t-test specify_model.inputs.contrasts = [ @@ -481,7 +500,7 @@ def get_group_level_outputs(self): 'contrast_id': self.contrast_list, 'method': ['equalRange', 'equalIndifference'], 'file': [ - '_cluster0/zstat1_pval.nii.gz', # TODO : output for randomise + '_cluster0/zstat1_pval.nii.gz', '_cluster0/zstat1_threshold.nii.gz', '_cluster1/zstat2_pval.nii.gz', '_cluster1/zstat2_threshold.nii.gz', diff --git a/tests/pipelines/test_team_4SZ2.py b/tests/pipelines/test_team_4SZ2.py index e9c4be9a..291b0704 100644 --- a/tests/pipelines/test_team_4SZ2.py +++ b/tests/pipelines/test_team_4SZ2.py @@ -31,13 +31,13 @@ def test_create(): pipeline = PipelineTeam4SZ2() # 1 - check the parameters - assert pipeline.fwhm == 6.0 + assert pipeline.fwhm == 5.0 assert pipeline.team_id == '4SZ2' # 2 - check workflows assert pipeline.get_preprocessing() is None assert isinstance(pipeline.get_run_level_analysis(), Workflow) - assert isinstance(pipeline.get_subject_level_analysis(), Workflow) + assert pipeline.get_subject_level_analysis() is None group_level = pipeline.get_group_level_analysis() assert len(group_level) == 3 for sub_workflow in group_level: @@ -52,11 +52,11 @@ def test_outputs(): # 1 - 1 subject outputs pipeline.subject_list = ['001'] - helpers.test_pipeline_outputs(pipeline, [0, 4*1*2*4, 4*2*1 + 2*1, 8*4*2 + 4*4, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 2*4*1*4, 0, 8*2*2 + 4*2, 18]) # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - helpers.test_pipeline_outputs(pipeline, [0, 4*4*2*4, 4*2*4 + 2*4, 8*4*2 + 4*4, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 2*4*4*4, 0, 8*2*2 + 4*2, 18]) @staticmethod @mark.unit_test @@ -72,7 +72,7 @@ def test_subject_information(): # Compare bunches to expected bunch = info_missed[0] assert isinstance(bunch, Bunch) - assert bunch.conditions == ['trial', 'gain', 'loss'] + assert bunch.conditions == ['gain', 'loss'] helpers.compare_float_2d_arrays(bunch.onsets, [ [4.071, 11.834, 19.535, 27.535, 36.435], [4.071, 11.834, 19.535, 27.535, 36.435] From 783c79efeba1931729eb7e2a1af46fe924bf9e65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 16 Apr 2024 11:28:44 +0200 Subject: [PATCH 04/17] Correcting bugs in group level analyses --- narps_open/pipelines/team_4SZ2.py | 67 ++++++++++++++++++++----------- tests/pipelines/test_team_4SZ2.py | 29 ++++++++++++- 2 files changed, 70 insertions(+), 26 deletions(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index c68ef425..07ceed47 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -214,6 +214,7 @@ def get_two_sample_t_test_regressors( equal_range_ids: list, equal_indifference_ids: list, subject_list: list, + run_list: list ) -> dict: """ Create dictionary of regressors for two sample t-test group analysis. @@ -222,19 +223,24 @@ def get_two_sample_t_test_regressors( - equal_range_ids: ids of subjects in equal range group - equal_indifference_ids: ids of subjects in equal indifference group - subject_list: ids of subject for which to do the analysis - + - run_list: ids of runs for which to do the analysis Returns: - regressors, dict: containing named lists of regressors. - groups, list: group identifiers to distinguish groups in FSL analysis. """ - # Create 2 lists containing n_sub values which are + # Create 2 lists containing a value for each run, which is # * 1 if the participant is on the group # * 0 otherwise - equal_range_regressors = [1 if i in equal_range_ids else 0 for i in subject_list] - equal_indifference_regressors = [ - 1 if i in equal_indifference_ids else 0 for i in subject_list - ] + equal_range_regressors = [] + equal_indifference_regressors = [] + + for subject_id in subject_list: + value_er = 1 if subject_id in equal_range_ids else 0 + value_ei = 1 if subject_id in equal_indifference_ids else 0 + for _ in run_list: + equal_range_regressors.append(value_er) + equal_indifference_regressors.append(value_ei) # Create regressors output : a dict with the two list regressors = dict( @@ -268,9 +274,14 @@ def get_group_level_analysis_sub_workflow(self, method): Returns: - group_level: nipype.WorkFlow """ - # Compute the number of participants used to do the analysis + # Compute the number of participants in the analysis nb_subjects = len(self.subject_list) + # Compute the number of participants in the group + nb_subjects_in_group = nb_subjects + if method in ['equalIndifference', 'equalRange']: + nb_subjects_in_group = len([s for s in self.subject_list if s in get_group(method)]) + # Declare the workflow group_level = Workflow( base_dir = self.directories.working_dir, @@ -294,9 +305,13 @@ def get_group_level_analysis_sub_workflow(self, method): 'sub-*_task-MGT_run-*_bold_space-MNI152NLin2009cAsym_brainmask.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') - select_files.inputs.base_directory = self.directories.results_dir + select_files.inputs.base_directory = self.directories.dataset_dir group_level.connect(information_source, 'contrast_id', select_files, 'contrast_id') + # Create a function to complete the subject ids out from the get_*_subjects node + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + complete_sub_ids = lambda l : [f'sub-{a}' for a in l] + # Function Node elements_in_string # Get contrast of parameter estimates (cope) for subjects in a given group # Note : using a MapNode with elements_in_string requires using clean_list to remove @@ -348,14 +363,15 @@ def get_group_level_analysis_sub_workflow(self, method): # Split Node - Split mask list to serve them as inputs of the MultiImageMaths node. split_masks = Node(Split(), name = 'split_masks') - split_masks.inputs.splits = [1, len(self.subject_list) - 1] + split_masks.inputs.splits = [1, (nb_subjects_in_group * len(self.run_list)) - 1] split_masks.inputs.squeeze = True # Unfold one-element splits removing the list group_level.connect(get_masks, ('out_list', clean_list), split_masks, 'inlist') # MultiImageMaths Node - Create a subject mask by # computing the intersection of all run masks. mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') - mask_intersection.inputs.op_string = '-mul %s ' * (len(self.subject_list) - 1) + mask_intersection.inputs.op_string = '-mul %s ' * \ + ((nb_subjects_in_group * len(self.run_list)) - 1) group_level.connect(split_masks, 'out1', mask_intersection, 'in_file') group_level.connect(split_masks, 'out2', mask_intersection, 'operand_files') @@ -411,9 +427,12 @@ def get_group_level_analysis_sub_workflow(self, method): ) get_group_subjects.inputs.list_1 = get_group(method) get_group_subjects.inputs.list_2 = self.subject_list - group_level.connect(get_group_subjects, 'out_list', get_copes, 'elements') - group_level.connect(get_group_subjects, 'out_list', get_varcopes, 'elements') - group_level.connect(get_group_subjects, 'out_list', get_masks, 'elements') + group_level.connect( + get_group_subjects, ('out_list', complete_subject_ids), get_copes, 'elements') + group_level.connect( + get_group_subjects, ('out_list', complete_subject_ids), get_varcopes, 'elements') + group_level.connect( + get_group_subjects, ('out_list', complete_sub_ids), get_masks, 'elements') # Function Node get_one_sample_t_test_regressors # Get regressors in the equalRange and equalIndifference method case @@ -425,16 +444,17 @@ def get_group_level_analysis_sub_workflow(self, method): ), name = 'regressors_one_sample', ) - group_level.connect(get_group_subjects, 'out_list', regressors_one_sample, 'subject_list') + regressors_one_sample.inputs.subject_list = range( + nb_subjects_in_group * len(self.run_list)) group_level.connect(regressors_one_sample, 'regressors', specify_model, 'regressors') elif method == 'groupComp': # Select copes and varcopes corresponding to the selected subjects # Indeed the SelectFiles node asks for all (*) subjects available - get_copes.inputs.elements = self.subject_list - get_varcopes.inputs.elements = self.subject_list - get_masks.inputs.elements = self.subject_list + get_copes.inputs.elements = complete_subject_ids(self.subject_list) + get_varcopes.inputs.elements = complete_subject_ids(self.subject_list) + get_masks.inputs.elements = complete_sub_ids(self.subject_list) # Setup a two sample t-test specify_model.inputs.contrasts = [ @@ -474,12 +494,14 @@ def get_group_level_analysis_sub_workflow(self, method): 'equal_range_ids', 'equal_indifference_ids', 'subject_list', + 'run_list' ], output_names = ['regressors', 'groups'] ), name = 'regressors_two_sample', ) regressors_two_sample.inputs.subject_list = self.subject_list + regressors_two_sample.inputs.run_list = self.run_list # Add missing connections group_level.connect( @@ -500,9 +522,7 @@ def get_group_level_outputs(self): 'contrast_id': self.contrast_list, 'method': ['equalRange', 'equalIndifference'], 'file': [ - '_cluster0/zstat1_pval.nii.gz', '_cluster0/zstat1_threshold.nii.gz', - '_cluster1/zstat2_pval.nii.gz', '_cluster1/zstat2_threshold.nii.gz', 'tstat1.nii.gz', 'tstat2.nii.gz', @@ -524,7 +544,6 @@ def get_group_level_outputs(self): parameters = { 'contrast_id': self.contrast_list, 'file': [ - '_cluster0/zstat1_pval.nii.gz', # TODO : output for randomise '_cluster0/zstat1_threshold.nii.gz', 'tstat1.nii.gz', 'zstat1.nii.gz' @@ -564,13 +583,13 @@ def get_hypotheses_outputs(self): join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), + '_contrast_id_2', '_cluster1', 'zstat2_threshold.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_2', 'zstat1.nii.gz'), + '_contrast_id_2', 'zstat2.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), + '_contrast_id_2', '_cluster1', 'zstat2_threshold.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_2', 'zstat1.nii.gz'), + '_contrast_id_2', 'zstat2.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', diff --git a/tests/pipelines/test_team_4SZ2.py b/tests/pipelines/test_team_4SZ2.py index 291b0704..1f3c08ff 100644 --- a/tests/pipelines/test_team_4SZ2.py +++ b/tests/pipelines/test_team_4SZ2.py @@ -52,11 +52,11 @@ def test_outputs(): # 1 - 1 subject outputs pipeline.subject_list = ['001'] - helpers.test_pipeline_outputs(pipeline, [0, 2*4*1*4, 0, 8*2*2 + 4*2, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 2*4*1*4, 0, 6*2*2 + 3*2, 18]) # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - helpers.test_pipeline_outputs(pipeline, [0, 2*4*4*4, 0, 8*2*2 + 4*2, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 2*4*4*4, 0, 6*2*2 + 3*2, 18]) @staticmethod @mark.unit_test @@ -86,6 +86,31 @@ def test_subject_information(): [6.0, 14.0, 19.0, 15.0, 17.0] ]) + @staticmethod + @mark.unit_test + def test_one_sample_t_test_regressors(): + """ Test the get_one_sample_t_test_regressors method """ + + result = PipelineTeam4SZ2.get_one_sample_t_test_regressors(['001', '002', '003', '004']) + assert result == {'group_mean' : [1]*4} + + @staticmethod + @mark.unit_test + def test_two_sample_t_test_regressors(): + """ Test the get_two_sample_t_test_regressors method """ + + result_1, result_2 = PipelineTeam4SZ2.get_two_sample_t_test_regressors( + ['001', '003'], # equal_range_ids + ['002', '004'], # equal_indifference_ids + ['001', '002', '003', '004'], # subject_list + ['01', '02'] # run_list + ) + assert result_1 == { + 'equalRange' : [1, 1, 0, 0, 1, 1, 0, 0], + 'equalIndifference' : [0, 0, 1, 1, 0, 0, 1, 1] + } + assert result_2 == [1, 1, 2, 2, 1, 1, 2, 2] + @staticmethod @mark.pipeline_test def test_execution(): From 138e408ebb9ded572f2c100629406ce01f184cd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 17 Apr 2024 09:24:02 +0200 Subject: [PATCH 05/17] First dockerfile test based on nipype --- Dockerfile | 320 +---------------------------------------------------- 1 file changed, 3 insertions(+), 317 deletions(-) diff --git a/Dockerfile b/Dockerfile index 13b22194..b4956310 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,319 +1,5 @@ -# Generated by: Neurodocker version 0.7.0+0.gdc97516.dirty -# Latest release: Neurodocker version 0.7.0 -# Timestamp: 2021/11/09 11:04:47 UTC -# -# Thank you for using Neurodocker. If you discover any issues -# or ways to improve this software, please submit an issue or -# pull request on our GitHub repository: -# -# https://github.com/ReproNim/neurodocker - -FROM neurodebian:stretch-non-free - +FROM nipype/nipype:py38 +COPY . /work USER root - -ARG DEBIAN_FRONTEND="noninteractive" - -ENV LANG="en_US.UTF-8" \ - LC_ALL="en_US.UTF-8" \ - ND_ENTRYPOINT="/neurodocker/startup.sh" -RUN export ND_ENTRYPOINT="/neurodocker/startup.sh" \ - && apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - apt-utils \ - bzip2 \ - ca-certificates \ - curl \ - locales \ - unzip \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ - && dpkg-reconfigure --frontend=noninteractive locales \ - && update-locale LANG="en_US.UTF-8" \ - && chmod 777 /opt && chmod a+s /opt \ - && mkdir -p /neurodocker \ - && if [ ! -f "$ND_ENTRYPOINT" ]; then \ - echo '#!/usr/bin/env bash' >> "$ND_ENTRYPOINT" \ - && echo 'set -e' >> "$ND_ENTRYPOINT" \ - && echo 'export USER="${USER:=`whoami`}"' >> "$ND_ENTRYPOINT" \ - && echo 'if [ -n "$1" ]; then "$@"; else /usr/bin/env bash; fi' >> "$ND_ENTRYPOINT"; \ - fi \ - && chmod -R 777 /neurodocker && chmod a+s /neurodocker - -ENTRYPOINT ["/neurodocker/startup.sh"] - -RUN apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - git \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -ENV FSLDIR="/opt/fsl-6.0.3" \ - PATH="/opt/fsl-6.0.3/bin:$PATH" \ - FSLOUTPUTTYPE="NIFTI_GZ" \ - FSLMULTIFILEQUIT="TRUE" \ - FSLTCLSH="/opt/fsl-6.0.3/bin/fsltclsh" \ - FSLWISH="/opt/fsl-6.0.3/bin/fslwish" \ - FSLLOCKDIR="" \ - FSLMACHINELIST="" \ - FSLREMOTECALL="" \ - FSLGECUDAQ="cuda.q" -RUN apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - bc \ - dc \ - file \ - libfontconfig1 \ - libfreetype6 \ - libgl1-mesa-dev \ - libgl1-mesa-dri \ - libglu1-mesa-dev \ - libgomp1 \ - libice6 \ - libxcursor1 \ - libxft2 \ - libxinerama1 \ - libxrandr2 \ - libxrender1 \ - libxt6 \ - sudo \ - wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && echo "Downloading FSL ..." \ - && mkdir -p /opt/fsl-6.0.3 \ - && curl -fsSL --retry 5 https://fsl.fmrib.ox.ac.uk/fsldownloads/fsl-6.0.3-centos6_64.tar.gz \ - | tar -xz -C /opt/fsl-6.0.3 --strip-components 1 \ - && sed -i '$iecho Some packages in this Docker container are non-free' $ND_ENTRYPOINT \ - && sed -i '$iecho If you are considering commercial use of this container, please consult the relevant license:' $ND_ENTRYPOINT \ - && sed -i '$iecho https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Licence' $ND_ENTRYPOINT \ - && sed -i '$isource $FSLDIR/etc/fslconf/fsl.sh' $ND_ENTRYPOINT \ - && echo "Installing FSL conda environment ..." \ - && bash /opt/fsl-6.0.3/etc/fslconf/fslpython_install.sh -f /opt/fsl-6.0.3 - -ENV PATH="/opt/afni-latest:$PATH" \ - AFNI_PLUGINPATH="/opt/afni-latest" -RUN apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - ed \ - gsl-bin \ - libglib2.0-0 \ - libglu1-mesa-dev \ - libglw1-mesa \ - libgomp1 \ - libjpeg62 \ - libnlopt-dev \ - libxm4 \ - netpbm \ - python \ - python3 \ - r-base \ - r-base-dev \ - tcsh \ - xfonts-base \ - xvfb \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ - && dpkg -i /tmp/toinstall.deb \ - && rm /tmp/toinstall.deb \ - && curl -sSL --retry 5 -o /tmp/toinstall.deb http://snapshot.debian.org/archive/debian-security/20160113T213056Z/pool/updates/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb \ - && dpkg -i /tmp/toinstall.deb \ - && rm /tmp/toinstall.deb \ - && apt-get install -f \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && gsl2_path="$(find / -name 'libgsl.so.19' || printf '')" \ - && if [ -n "$gsl2_path" ]; then \ - ln -sfv "$gsl2_path" "$(dirname $gsl2_path)/libgsl.so.0"; \ - fi \ - && ldconfig \ - && echo "Downloading AFNI ..." \ - && mkdir -p /opt/afni-latest \ - && curl -fsSL --retry 5 https://afni.nimh.nih.gov/pub/dist/tgz/linux_openmp_64.tgz \ - | tar -xz -C /opt/afni-latest --strip-components 1 \ - && PATH=$PATH:/opt/afni-latest rPkgsInstall -pkgs ALL - -ENV FORCE_SPMMCR="1" \ - SPM_HTML_BROWSER="0" \ - LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/x86_64-linux-gnu:/opt/matlabmcr-2010a/v713/runtime/glnxa64:/opt/matlabmcr-2010a/v713/bin/glnxa64:/opt/matlabmcr-2010a/v713/sys/os/glnxa64:/opt/matlabmcr-2010a/v713/extern/bin/glnxa64" \ - MATLABCMD="/opt/matlabmcr-2010a/v713/toolbox/matlab" -RUN export TMPDIR="$(mktemp -d)" \ - && apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - bc \ - libncurses5 \ - libxext6 \ - libxmu6 \ - libxpm-dev \ - libxt6 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && echo "Downloading MATLAB Compiler Runtime ..." \ - && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ - && dpkg -i /tmp/toinstall.deb \ - && rm /tmp/toinstall.deb \ - && apt-get install -f \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && curl -fsSL --retry 5 -o "$TMPDIR/MCRInstaller.bin" https://dl.dropbox.com/s/zz6me0c3v4yq5fd/MCR_R2010a_glnxa64_installer.bin \ - && chmod +x "$TMPDIR/MCRInstaller.bin" \ - && "$TMPDIR/MCRInstaller.bin" -silent -P installLocation="/opt/matlabmcr-2010a" \ - && rm -rf "$TMPDIR" \ - && unset TMPDIR \ - && echo "Downloading standalone SPM ..." \ - && curl -fsSL --retry 5 -o /tmp/spm12.zip https://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/previous/spm12_r7771_R2010a.zip \ - && unzip -q /tmp/spm12.zip -d /tmp \ - && mkdir -p /opt/spm12-r7771 \ - && mv /tmp/spm12/* /opt/spm12-r7771/ \ - && chmod -R 777 /opt/spm12-r7771 \ - && rm -rf /tmp/spm* \ - && /opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 quit \ - && sed -i '$iexport SPMMCRCMD=\"/opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 script\"' $ND_ENTRYPOINT - -RUN test "$(getent passwd neuro)" || useradd --no-user-group --create-home --shell /bin/bash neuro +RUN /bin/bash -c "source activate neuro && pip install /work" USER neuro - -WORKDIR /home - -ENV CONDA_DIR="/opt/miniconda-latest" \ - PATH="/opt/miniconda-latest/bin:$PATH" -RUN export PATH="/opt/miniconda-latest/bin:$PATH" \ - && echo "Downloading Miniconda installer ..." \ - && conda_installer="/tmp/miniconda.sh" \ - && curl -fsSL --retry 5 -o "$conda_installer" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ - && bash "$conda_installer" -b -p /opt/miniconda-latest \ - && rm -f "$conda_installer" \ - && conda update -yq -nbase conda \ - && conda config --system --prepend channels conda-forge \ - && conda config --system --set auto_update_conda false \ - && conda config --system --set show_channel_urls true \ - && sync && conda clean -y --all && sync \ - && conda create -y -q --name neuro \ - && conda install -y -q --name neuro \ - "python=3.8" \ - "traits" \ - "jupyter" \ - "nilearn" \ - "graphviz" \ - "nipype" \ - "scikit-image" \ - && sync && conda clean -y --all && sync \ - && bash -c "source activate neuro \ - && pip install --no-cache-dir \ - "matplotlib"" \ - && rm -rf ~/.cache/pip/* \ - && sync \ - && sed -i '$isource activate neuro' $ND_ENTRYPOINT - -ENV LD_LIBRARY_PATH="/opt/miniconda-latest/envs/neuro:" - -RUN bash -c 'source activate neuro' - -USER root - -RUN chmod 777 -Rf /home - -RUN chown -R neuro /home - -USER neuro - -RUN mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py - -RUN echo '{ \ - \n "pkg_manager": "apt", \ - \n "instructions": [ \ - \n [ \ - \n "base", \ - \n "neurodebian:stretch-non-free" \ - \n ], \ - \n [ \ - \n "install", \ - \n [ \ - \n "git" \ - \n ] \ - \n ], \ - \n [ \ - \n "fsl", \ - \n { \ - \n "version": "6.0.3" \ - \n } \ - \n ], \ - \n [ \ - \n "afni", \ - \n { \ - \n "version": "latest", \ - \n "method": "binaries", \ - \n "install_r": "true", \ - \n "install_r_pkgs": "true", \ - \n "install_python2": "true", \ - \n "install_python3": "true" \ - \n } \ - \n ], \ - \n [ \ - \n "spm12", \ - \n { \ - \n "version": "r7771", \ - \n "method": "binaries" \ - \n } \ - \n ], \ - \n [ \ - \n "user", \ - \n "neuro" \ - \n ], \ - \n [ \ - \n "workdir", \ - \n "/home" \ - \n ], \ - \n [ \ - \n "miniconda", \ - \n { \ - \n "create_env": "neuro", \ - \n "conda_install": [ \ - \n "python=3.8", \ - \n "traits", \ - \n "jupyter", \ - \n "nilearn", \ - \n "graphviz", \ - \n "nipype", \ - \n "scikit-image" \ - \n ], \ - \n "pip_install": [ \ - \n "matplotlib" \ - \n ], \ - \n "activate": true \ - \n } \ - \n ], \ - \n [ \ - \n "env", \ - \n { \ - \n "LD_LIBRARY_PATH": "/opt/miniconda-latest/envs/neuro:" \ - \n } \ - \n ], \ - \n [ \ - \n "run_bash", \ - \n "source activate neuro" \ - \n ], \ - \n [ \ - \n "user", \ - \n "root" \ - \n ], \ - \n [ \ - \n "run", \ - \n "chmod 777 -Rf /home" \ - \n ], \ - \n [ \ - \n "run", \ - \n "chown -R neuro /home" \ - \n ], \ - \n [ \ - \n "user", \ - \n "neuro" \ - \n ], \ - \n [ \ - \n "run", \ - \n "mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \\\"0.0.0.0\\\" > ~/.jupyter/jupyter_notebook_config.py" \ - \n ] \ - \n ] \ - \n}' > /neurodocker/neurodocker_specs.json From 5acd1e0fcf236810931b7674959363c5809e8fc6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 17 Apr 2024 13:32:44 +0200 Subject: [PATCH 06/17] Undo changes on Dockerfile --- Dockerfile | 320 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 317 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index b4956310..13b22194 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,319 @@ -FROM nipype/nipype:py38 -COPY . /work +# Generated by: Neurodocker version 0.7.0+0.gdc97516.dirty +# Latest release: Neurodocker version 0.7.0 +# Timestamp: 2021/11/09 11:04:47 UTC +# +# Thank you for using Neurodocker. If you discover any issues +# or ways to improve this software, please submit an issue or +# pull request on our GitHub repository: +# +# https://github.com/ReproNim/neurodocker + +FROM neurodebian:stretch-non-free + USER root -RUN /bin/bash -c "source activate neuro && pip install /work" + +ARG DEBIAN_FRONTEND="noninteractive" + +ENV LANG="en_US.UTF-8" \ + LC_ALL="en_US.UTF-8" \ + ND_ENTRYPOINT="/neurodocker/startup.sh" +RUN export ND_ENTRYPOINT="/neurodocker/startup.sh" \ + && apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + apt-utils \ + bzip2 \ + ca-certificates \ + curl \ + locales \ + unzip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ + && dpkg-reconfigure --frontend=noninteractive locales \ + && update-locale LANG="en_US.UTF-8" \ + && chmod 777 /opt && chmod a+s /opt \ + && mkdir -p /neurodocker \ + && if [ ! -f "$ND_ENTRYPOINT" ]; then \ + echo '#!/usr/bin/env bash' >> "$ND_ENTRYPOINT" \ + && echo 'set -e' >> "$ND_ENTRYPOINT" \ + && echo 'export USER="${USER:=`whoami`}"' >> "$ND_ENTRYPOINT" \ + && echo 'if [ -n "$1" ]; then "$@"; else /usr/bin/env bash; fi' >> "$ND_ENTRYPOINT"; \ + fi \ + && chmod -R 777 /neurodocker && chmod a+s /neurodocker + +ENTRYPOINT ["/neurodocker/startup.sh"] + +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + git \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +ENV FSLDIR="/opt/fsl-6.0.3" \ + PATH="/opt/fsl-6.0.3/bin:$PATH" \ + FSLOUTPUTTYPE="NIFTI_GZ" \ + FSLMULTIFILEQUIT="TRUE" \ + FSLTCLSH="/opt/fsl-6.0.3/bin/fsltclsh" \ + FSLWISH="/opt/fsl-6.0.3/bin/fslwish" \ + FSLLOCKDIR="" \ + FSLMACHINELIST="" \ + FSLREMOTECALL="" \ + FSLGECUDAQ="cuda.q" +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + bc \ + dc \ + file \ + libfontconfig1 \ + libfreetype6 \ + libgl1-mesa-dev \ + libgl1-mesa-dri \ + libglu1-mesa-dev \ + libgomp1 \ + libice6 \ + libxcursor1 \ + libxft2 \ + libxinerama1 \ + libxrandr2 \ + libxrender1 \ + libxt6 \ + sudo \ + wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && echo "Downloading FSL ..." \ + && mkdir -p /opt/fsl-6.0.3 \ + && curl -fsSL --retry 5 https://fsl.fmrib.ox.ac.uk/fsldownloads/fsl-6.0.3-centos6_64.tar.gz \ + | tar -xz -C /opt/fsl-6.0.3 --strip-components 1 \ + && sed -i '$iecho Some packages in this Docker container are non-free' $ND_ENTRYPOINT \ + && sed -i '$iecho If you are considering commercial use of this container, please consult the relevant license:' $ND_ENTRYPOINT \ + && sed -i '$iecho https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Licence' $ND_ENTRYPOINT \ + && sed -i '$isource $FSLDIR/etc/fslconf/fsl.sh' $ND_ENTRYPOINT \ + && echo "Installing FSL conda environment ..." \ + && bash /opt/fsl-6.0.3/etc/fslconf/fslpython_install.sh -f /opt/fsl-6.0.3 + +ENV PATH="/opt/afni-latest:$PATH" \ + AFNI_PLUGINPATH="/opt/afni-latest" +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + ed \ + gsl-bin \ + libglib2.0-0 \ + libglu1-mesa-dev \ + libglw1-mesa \ + libgomp1 \ + libjpeg62 \ + libnlopt-dev \ + libxm4 \ + netpbm \ + python \ + python3 \ + r-base \ + r-base-dev \ + tcsh \ + xfonts-base \ + xvfb \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ + && dpkg -i /tmp/toinstall.deb \ + && rm /tmp/toinstall.deb \ + && curl -sSL --retry 5 -o /tmp/toinstall.deb http://snapshot.debian.org/archive/debian-security/20160113T213056Z/pool/updates/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb \ + && dpkg -i /tmp/toinstall.deb \ + && rm /tmp/toinstall.deb \ + && apt-get install -f \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && gsl2_path="$(find / -name 'libgsl.so.19' || printf '')" \ + && if [ -n "$gsl2_path" ]; then \ + ln -sfv "$gsl2_path" "$(dirname $gsl2_path)/libgsl.so.0"; \ + fi \ + && ldconfig \ + && echo "Downloading AFNI ..." \ + && mkdir -p /opt/afni-latest \ + && curl -fsSL --retry 5 https://afni.nimh.nih.gov/pub/dist/tgz/linux_openmp_64.tgz \ + | tar -xz -C /opt/afni-latest --strip-components 1 \ + && PATH=$PATH:/opt/afni-latest rPkgsInstall -pkgs ALL + +ENV FORCE_SPMMCR="1" \ + SPM_HTML_BROWSER="0" \ + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/x86_64-linux-gnu:/opt/matlabmcr-2010a/v713/runtime/glnxa64:/opt/matlabmcr-2010a/v713/bin/glnxa64:/opt/matlabmcr-2010a/v713/sys/os/glnxa64:/opt/matlabmcr-2010a/v713/extern/bin/glnxa64" \ + MATLABCMD="/opt/matlabmcr-2010a/v713/toolbox/matlab" +RUN export TMPDIR="$(mktemp -d)" \ + && apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + bc \ + libncurses5 \ + libxext6 \ + libxmu6 \ + libxpm-dev \ + libxt6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && echo "Downloading MATLAB Compiler Runtime ..." \ + && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ + && dpkg -i /tmp/toinstall.deb \ + && rm /tmp/toinstall.deb \ + && apt-get install -f \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && curl -fsSL --retry 5 -o "$TMPDIR/MCRInstaller.bin" https://dl.dropbox.com/s/zz6me0c3v4yq5fd/MCR_R2010a_glnxa64_installer.bin \ + && chmod +x "$TMPDIR/MCRInstaller.bin" \ + && "$TMPDIR/MCRInstaller.bin" -silent -P installLocation="/opt/matlabmcr-2010a" \ + && rm -rf "$TMPDIR" \ + && unset TMPDIR \ + && echo "Downloading standalone SPM ..." \ + && curl -fsSL --retry 5 -o /tmp/spm12.zip https://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/previous/spm12_r7771_R2010a.zip \ + && unzip -q /tmp/spm12.zip -d /tmp \ + && mkdir -p /opt/spm12-r7771 \ + && mv /tmp/spm12/* /opt/spm12-r7771/ \ + && chmod -R 777 /opt/spm12-r7771 \ + && rm -rf /tmp/spm* \ + && /opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 quit \ + && sed -i '$iexport SPMMCRCMD=\"/opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 script\"' $ND_ENTRYPOINT + +RUN test "$(getent passwd neuro)" || useradd --no-user-group --create-home --shell /bin/bash neuro USER neuro + +WORKDIR /home + +ENV CONDA_DIR="/opt/miniconda-latest" \ + PATH="/opt/miniconda-latest/bin:$PATH" +RUN export PATH="/opt/miniconda-latest/bin:$PATH" \ + && echo "Downloading Miniconda installer ..." \ + && conda_installer="/tmp/miniconda.sh" \ + && curl -fsSL --retry 5 -o "$conda_installer" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ + && bash "$conda_installer" -b -p /opt/miniconda-latest \ + && rm -f "$conda_installer" \ + && conda update -yq -nbase conda \ + && conda config --system --prepend channels conda-forge \ + && conda config --system --set auto_update_conda false \ + && conda config --system --set show_channel_urls true \ + && sync && conda clean -y --all && sync \ + && conda create -y -q --name neuro \ + && conda install -y -q --name neuro \ + "python=3.8" \ + "traits" \ + "jupyter" \ + "nilearn" \ + "graphviz" \ + "nipype" \ + "scikit-image" \ + && sync && conda clean -y --all && sync \ + && bash -c "source activate neuro \ + && pip install --no-cache-dir \ + "matplotlib"" \ + && rm -rf ~/.cache/pip/* \ + && sync \ + && sed -i '$isource activate neuro' $ND_ENTRYPOINT + +ENV LD_LIBRARY_PATH="/opt/miniconda-latest/envs/neuro:" + +RUN bash -c 'source activate neuro' + +USER root + +RUN chmod 777 -Rf /home + +RUN chown -R neuro /home + +USER neuro + +RUN mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py + +RUN echo '{ \ + \n "pkg_manager": "apt", \ + \n "instructions": [ \ + \n [ \ + \n "base", \ + \n "neurodebian:stretch-non-free" \ + \n ], \ + \n [ \ + \n "install", \ + \n [ \ + \n "git" \ + \n ] \ + \n ], \ + \n [ \ + \n "fsl", \ + \n { \ + \n "version": "6.0.3" \ + \n } \ + \n ], \ + \n [ \ + \n "afni", \ + \n { \ + \n "version": "latest", \ + \n "method": "binaries", \ + \n "install_r": "true", \ + \n "install_r_pkgs": "true", \ + \n "install_python2": "true", \ + \n "install_python3": "true" \ + \n } \ + \n ], \ + \n [ \ + \n "spm12", \ + \n { \ + \n "version": "r7771", \ + \n "method": "binaries" \ + \n } \ + \n ], \ + \n [ \ + \n "user", \ + \n "neuro" \ + \n ], \ + \n [ \ + \n "workdir", \ + \n "/home" \ + \n ], \ + \n [ \ + \n "miniconda", \ + \n { \ + \n "create_env": "neuro", \ + \n "conda_install": [ \ + \n "python=3.8", \ + \n "traits", \ + \n "jupyter", \ + \n "nilearn", \ + \n "graphviz", \ + \n "nipype", \ + \n "scikit-image" \ + \n ], \ + \n "pip_install": [ \ + \n "matplotlib" \ + \n ], \ + \n "activate": true \ + \n } \ + \n ], \ + \n [ \ + \n "env", \ + \n { \ + \n "LD_LIBRARY_PATH": "/opt/miniconda-latest/envs/neuro:" \ + \n } \ + \n ], \ + \n [ \ + \n "run_bash", \ + \n "source activate neuro" \ + \n ], \ + \n [ \ + \n "user", \ + \n "root" \ + \n ], \ + \n [ \ + \n "run", \ + \n "chmod 777 -Rf /home" \ + \n ], \ + \n [ \ + \n "run", \ + \n "chown -R neuro /home" \ + \n ], \ + \n [ \ + \n "user", \ + \n "neuro" \ + \n ], \ + \n [ \ + \n "run", \ + \n "mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \\\"0.0.0.0\\\" > ~/.jupyter/jupyter_notebook_config.py" \ + \n ] \ + \n ] \ + \n}' > /neurodocker/neurodocker_specs.json From d73d3ca180f2cfe07326982d9a399cf6ffd72143 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 19 Apr 2024 13:52:42 +0200 Subject: [PATCH 07/17] Correlation update --- Dockerfile | 320 +---------------------- narps_open/utils/correlation/__main__.py | 2 +- 2 files changed, 4 insertions(+), 318 deletions(-) diff --git a/Dockerfile b/Dockerfile index 13b22194..b4956310 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,319 +1,5 @@ -# Generated by: Neurodocker version 0.7.0+0.gdc97516.dirty -# Latest release: Neurodocker version 0.7.0 -# Timestamp: 2021/11/09 11:04:47 UTC -# -# Thank you for using Neurodocker. If you discover any issues -# or ways to improve this software, please submit an issue or -# pull request on our GitHub repository: -# -# https://github.com/ReproNim/neurodocker - -FROM neurodebian:stretch-non-free - +FROM nipype/nipype:py38 +COPY . /work USER root - -ARG DEBIAN_FRONTEND="noninteractive" - -ENV LANG="en_US.UTF-8" \ - LC_ALL="en_US.UTF-8" \ - ND_ENTRYPOINT="/neurodocker/startup.sh" -RUN export ND_ENTRYPOINT="/neurodocker/startup.sh" \ - && apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - apt-utils \ - bzip2 \ - ca-certificates \ - curl \ - locales \ - unzip \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ - && dpkg-reconfigure --frontend=noninteractive locales \ - && update-locale LANG="en_US.UTF-8" \ - && chmod 777 /opt && chmod a+s /opt \ - && mkdir -p /neurodocker \ - && if [ ! -f "$ND_ENTRYPOINT" ]; then \ - echo '#!/usr/bin/env bash' >> "$ND_ENTRYPOINT" \ - && echo 'set -e' >> "$ND_ENTRYPOINT" \ - && echo 'export USER="${USER:=`whoami`}"' >> "$ND_ENTRYPOINT" \ - && echo 'if [ -n "$1" ]; then "$@"; else /usr/bin/env bash; fi' >> "$ND_ENTRYPOINT"; \ - fi \ - && chmod -R 777 /neurodocker && chmod a+s /neurodocker - -ENTRYPOINT ["/neurodocker/startup.sh"] - -RUN apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - git \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - -ENV FSLDIR="/opt/fsl-6.0.3" \ - PATH="/opt/fsl-6.0.3/bin:$PATH" \ - FSLOUTPUTTYPE="NIFTI_GZ" \ - FSLMULTIFILEQUIT="TRUE" \ - FSLTCLSH="/opt/fsl-6.0.3/bin/fsltclsh" \ - FSLWISH="/opt/fsl-6.0.3/bin/fslwish" \ - FSLLOCKDIR="" \ - FSLMACHINELIST="" \ - FSLREMOTECALL="" \ - FSLGECUDAQ="cuda.q" -RUN apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - bc \ - dc \ - file \ - libfontconfig1 \ - libfreetype6 \ - libgl1-mesa-dev \ - libgl1-mesa-dri \ - libglu1-mesa-dev \ - libgomp1 \ - libice6 \ - libxcursor1 \ - libxft2 \ - libxinerama1 \ - libxrandr2 \ - libxrender1 \ - libxt6 \ - sudo \ - wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && echo "Downloading FSL ..." \ - && mkdir -p /opt/fsl-6.0.3 \ - && curl -fsSL --retry 5 https://fsl.fmrib.ox.ac.uk/fsldownloads/fsl-6.0.3-centos6_64.tar.gz \ - | tar -xz -C /opt/fsl-6.0.3 --strip-components 1 \ - && sed -i '$iecho Some packages in this Docker container are non-free' $ND_ENTRYPOINT \ - && sed -i '$iecho If you are considering commercial use of this container, please consult the relevant license:' $ND_ENTRYPOINT \ - && sed -i '$iecho https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Licence' $ND_ENTRYPOINT \ - && sed -i '$isource $FSLDIR/etc/fslconf/fsl.sh' $ND_ENTRYPOINT \ - && echo "Installing FSL conda environment ..." \ - && bash /opt/fsl-6.0.3/etc/fslconf/fslpython_install.sh -f /opt/fsl-6.0.3 - -ENV PATH="/opt/afni-latest:$PATH" \ - AFNI_PLUGINPATH="/opt/afni-latest" -RUN apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - ed \ - gsl-bin \ - libglib2.0-0 \ - libglu1-mesa-dev \ - libglw1-mesa \ - libgomp1 \ - libjpeg62 \ - libnlopt-dev \ - libxm4 \ - netpbm \ - python \ - python3 \ - r-base \ - r-base-dev \ - tcsh \ - xfonts-base \ - xvfb \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ - && dpkg -i /tmp/toinstall.deb \ - && rm /tmp/toinstall.deb \ - && curl -sSL --retry 5 -o /tmp/toinstall.deb http://snapshot.debian.org/archive/debian-security/20160113T213056Z/pool/updates/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb \ - && dpkg -i /tmp/toinstall.deb \ - && rm /tmp/toinstall.deb \ - && apt-get install -f \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && gsl2_path="$(find / -name 'libgsl.so.19' || printf '')" \ - && if [ -n "$gsl2_path" ]; then \ - ln -sfv "$gsl2_path" "$(dirname $gsl2_path)/libgsl.so.0"; \ - fi \ - && ldconfig \ - && echo "Downloading AFNI ..." \ - && mkdir -p /opt/afni-latest \ - && curl -fsSL --retry 5 https://afni.nimh.nih.gov/pub/dist/tgz/linux_openmp_64.tgz \ - | tar -xz -C /opt/afni-latest --strip-components 1 \ - && PATH=$PATH:/opt/afni-latest rPkgsInstall -pkgs ALL - -ENV FORCE_SPMMCR="1" \ - SPM_HTML_BROWSER="0" \ - LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/x86_64-linux-gnu:/opt/matlabmcr-2010a/v713/runtime/glnxa64:/opt/matlabmcr-2010a/v713/bin/glnxa64:/opt/matlabmcr-2010a/v713/sys/os/glnxa64:/opt/matlabmcr-2010a/v713/extern/bin/glnxa64" \ - MATLABCMD="/opt/matlabmcr-2010a/v713/toolbox/matlab" -RUN export TMPDIR="$(mktemp -d)" \ - && apt-get update -qq \ - && apt-get install -y -q --no-install-recommends \ - bc \ - libncurses5 \ - libxext6 \ - libxmu6 \ - libxpm-dev \ - libxt6 \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && echo "Downloading MATLAB Compiler Runtime ..." \ - && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ - && dpkg -i /tmp/toinstall.deb \ - && rm /tmp/toinstall.deb \ - && apt-get install -f \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && curl -fsSL --retry 5 -o "$TMPDIR/MCRInstaller.bin" https://dl.dropbox.com/s/zz6me0c3v4yq5fd/MCR_R2010a_glnxa64_installer.bin \ - && chmod +x "$TMPDIR/MCRInstaller.bin" \ - && "$TMPDIR/MCRInstaller.bin" -silent -P installLocation="/opt/matlabmcr-2010a" \ - && rm -rf "$TMPDIR" \ - && unset TMPDIR \ - && echo "Downloading standalone SPM ..." \ - && curl -fsSL --retry 5 -o /tmp/spm12.zip https://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/previous/spm12_r7771_R2010a.zip \ - && unzip -q /tmp/spm12.zip -d /tmp \ - && mkdir -p /opt/spm12-r7771 \ - && mv /tmp/spm12/* /opt/spm12-r7771/ \ - && chmod -R 777 /opt/spm12-r7771 \ - && rm -rf /tmp/spm* \ - && /opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 quit \ - && sed -i '$iexport SPMMCRCMD=\"/opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 script\"' $ND_ENTRYPOINT - -RUN test "$(getent passwd neuro)" || useradd --no-user-group --create-home --shell /bin/bash neuro +RUN /bin/bash -c "source activate neuro && pip install /work" USER neuro - -WORKDIR /home - -ENV CONDA_DIR="/opt/miniconda-latest" \ - PATH="/opt/miniconda-latest/bin:$PATH" -RUN export PATH="/opt/miniconda-latest/bin:$PATH" \ - && echo "Downloading Miniconda installer ..." \ - && conda_installer="/tmp/miniconda.sh" \ - && curl -fsSL --retry 5 -o "$conda_installer" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ - && bash "$conda_installer" -b -p /opt/miniconda-latest \ - && rm -f "$conda_installer" \ - && conda update -yq -nbase conda \ - && conda config --system --prepend channels conda-forge \ - && conda config --system --set auto_update_conda false \ - && conda config --system --set show_channel_urls true \ - && sync && conda clean -y --all && sync \ - && conda create -y -q --name neuro \ - && conda install -y -q --name neuro \ - "python=3.8" \ - "traits" \ - "jupyter" \ - "nilearn" \ - "graphviz" \ - "nipype" \ - "scikit-image" \ - && sync && conda clean -y --all && sync \ - && bash -c "source activate neuro \ - && pip install --no-cache-dir \ - "matplotlib"" \ - && rm -rf ~/.cache/pip/* \ - && sync \ - && sed -i '$isource activate neuro' $ND_ENTRYPOINT - -ENV LD_LIBRARY_PATH="/opt/miniconda-latest/envs/neuro:" - -RUN bash -c 'source activate neuro' - -USER root - -RUN chmod 777 -Rf /home - -RUN chown -R neuro /home - -USER neuro - -RUN mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py - -RUN echo '{ \ - \n "pkg_manager": "apt", \ - \n "instructions": [ \ - \n [ \ - \n "base", \ - \n "neurodebian:stretch-non-free" \ - \n ], \ - \n [ \ - \n "install", \ - \n [ \ - \n "git" \ - \n ] \ - \n ], \ - \n [ \ - \n "fsl", \ - \n { \ - \n "version": "6.0.3" \ - \n } \ - \n ], \ - \n [ \ - \n "afni", \ - \n { \ - \n "version": "latest", \ - \n "method": "binaries", \ - \n "install_r": "true", \ - \n "install_r_pkgs": "true", \ - \n "install_python2": "true", \ - \n "install_python3": "true" \ - \n } \ - \n ], \ - \n [ \ - \n "spm12", \ - \n { \ - \n "version": "r7771", \ - \n "method": "binaries" \ - \n } \ - \n ], \ - \n [ \ - \n "user", \ - \n "neuro" \ - \n ], \ - \n [ \ - \n "workdir", \ - \n "/home" \ - \n ], \ - \n [ \ - \n "miniconda", \ - \n { \ - \n "create_env": "neuro", \ - \n "conda_install": [ \ - \n "python=3.8", \ - \n "traits", \ - \n "jupyter", \ - \n "nilearn", \ - \n "graphviz", \ - \n "nipype", \ - \n "scikit-image" \ - \n ], \ - \n "pip_install": [ \ - \n "matplotlib" \ - \n ], \ - \n "activate": true \ - \n } \ - \n ], \ - \n [ \ - \n "env", \ - \n { \ - \n "LD_LIBRARY_PATH": "/opt/miniconda-latest/envs/neuro:" \ - \n } \ - \n ], \ - \n [ \ - \n "run_bash", \ - \n "source activate neuro" \ - \n ], \ - \n [ \ - \n "user", \ - \n "root" \ - \n ], \ - \n [ \ - \n "run", \ - \n "chmod 777 -Rf /home" \ - \n ], \ - \n [ \ - \n "run", \ - \n "chown -R neuro /home" \ - \n ], \ - \n [ \ - \n "user", \ - \n "neuro" \ - \n ], \ - \n [ \ - \n "run", \ - \n "mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \\\"0.0.0.0\\\" > ~/.jupyter/jupyter_notebook_config.py" \ - \n ] \ - \n ] \ - \n}' > /neurodocker/neurodocker_specs.json diff --git a/narps_open/utils/correlation/__main__.py b/narps_open/utils/correlation/__main__.py index d086499b..0ccf80c2 100644 --- a/narps_open/utils/correlation/__main__.py +++ b/narps_open/utils/correlation/__main__.py @@ -19,7 +19,7 @@ def main(): parser = ArgumentParser(description = 'Compare reproduced files to original results.') parser.add_argument('-t', '--team', type = str, required = True, help = 'the team ID', choices = get_implemented_pipelines()) - subjects.add_argument('-n', '--nsubjects', type=str, required = True, + parser.add_argument('-n', '--nsubjects', type = int, required = True, help='the number of subjects to be selected') arguments = parser.parse_args() From 95c6012f06c1dbb59e32943cbf9270ea8c9a693a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 13 Jun 2024 14:53:31 +0200 Subject: [PATCH 08/17] Codespell in code of conduct --- CODE_OF_CONDUCT.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 2ed9dd15..094620ef 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -5,7 +5,7 @@ We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender -identity and expression, level of experience, education, socio-economic status, +identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. From 4b7de9c86167e9a757370335195bc0c8873e73a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 13 Jun 2024 15:20:12 +0200 Subject: [PATCH 09/17] Restoring Dockerfile --- Dockerfile | 320 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 317 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index b4956310..13b22194 100755 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,319 @@ -FROM nipype/nipype:py38 -COPY . /work +# Generated by: Neurodocker version 0.7.0+0.gdc97516.dirty +# Latest release: Neurodocker version 0.7.0 +# Timestamp: 2021/11/09 11:04:47 UTC +# +# Thank you for using Neurodocker. If you discover any issues +# or ways to improve this software, please submit an issue or +# pull request on our GitHub repository: +# +# https://github.com/ReproNim/neurodocker + +FROM neurodebian:stretch-non-free + USER root -RUN /bin/bash -c "source activate neuro && pip install /work" + +ARG DEBIAN_FRONTEND="noninteractive" + +ENV LANG="en_US.UTF-8" \ + LC_ALL="en_US.UTF-8" \ + ND_ENTRYPOINT="/neurodocker/startup.sh" +RUN export ND_ENTRYPOINT="/neurodocker/startup.sh" \ + && apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + apt-utils \ + bzip2 \ + ca-certificates \ + curl \ + locales \ + unzip \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen \ + && dpkg-reconfigure --frontend=noninteractive locales \ + && update-locale LANG="en_US.UTF-8" \ + && chmod 777 /opt && chmod a+s /opt \ + && mkdir -p /neurodocker \ + && if [ ! -f "$ND_ENTRYPOINT" ]; then \ + echo '#!/usr/bin/env bash' >> "$ND_ENTRYPOINT" \ + && echo 'set -e' >> "$ND_ENTRYPOINT" \ + && echo 'export USER="${USER:=`whoami`}"' >> "$ND_ENTRYPOINT" \ + && echo 'if [ -n "$1" ]; then "$@"; else /usr/bin/env bash; fi' >> "$ND_ENTRYPOINT"; \ + fi \ + && chmod -R 777 /neurodocker && chmod a+s /neurodocker + +ENTRYPOINT ["/neurodocker/startup.sh"] + +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + git \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +ENV FSLDIR="/opt/fsl-6.0.3" \ + PATH="/opt/fsl-6.0.3/bin:$PATH" \ + FSLOUTPUTTYPE="NIFTI_GZ" \ + FSLMULTIFILEQUIT="TRUE" \ + FSLTCLSH="/opt/fsl-6.0.3/bin/fsltclsh" \ + FSLWISH="/opt/fsl-6.0.3/bin/fslwish" \ + FSLLOCKDIR="" \ + FSLMACHINELIST="" \ + FSLREMOTECALL="" \ + FSLGECUDAQ="cuda.q" +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + bc \ + dc \ + file \ + libfontconfig1 \ + libfreetype6 \ + libgl1-mesa-dev \ + libgl1-mesa-dri \ + libglu1-mesa-dev \ + libgomp1 \ + libice6 \ + libxcursor1 \ + libxft2 \ + libxinerama1 \ + libxrandr2 \ + libxrender1 \ + libxt6 \ + sudo \ + wget \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && echo "Downloading FSL ..." \ + && mkdir -p /opt/fsl-6.0.3 \ + && curl -fsSL --retry 5 https://fsl.fmrib.ox.ac.uk/fsldownloads/fsl-6.0.3-centos6_64.tar.gz \ + | tar -xz -C /opt/fsl-6.0.3 --strip-components 1 \ + && sed -i '$iecho Some packages in this Docker container are non-free' $ND_ENTRYPOINT \ + && sed -i '$iecho If you are considering commercial use of this container, please consult the relevant license:' $ND_ENTRYPOINT \ + && sed -i '$iecho https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/Licence' $ND_ENTRYPOINT \ + && sed -i '$isource $FSLDIR/etc/fslconf/fsl.sh' $ND_ENTRYPOINT \ + && echo "Installing FSL conda environment ..." \ + && bash /opt/fsl-6.0.3/etc/fslconf/fslpython_install.sh -f /opt/fsl-6.0.3 + +ENV PATH="/opt/afni-latest:$PATH" \ + AFNI_PLUGINPATH="/opt/afni-latest" +RUN apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + ed \ + gsl-bin \ + libglib2.0-0 \ + libglu1-mesa-dev \ + libglw1-mesa \ + libgomp1 \ + libjpeg62 \ + libnlopt-dev \ + libxm4 \ + netpbm \ + python \ + python3 \ + r-base \ + r-base-dev \ + tcsh \ + xfonts-base \ + xvfb \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ + && dpkg -i /tmp/toinstall.deb \ + && rm /tmp/toinstall.deb \ + && curl -sSL --retry 5 -o /tmp/toinstall.deb http://snapshot.debian.org/archive/debian-security/20160113T213056Z/pool/updates/main/libp/libpng/libpng12-0_1.2.49-1%2Bdeb7u2_amd64.deb \ + && dpkg -i /tmp/toinstall.deb \ + && rm /tmp/toinstall.deb \ + && apt-get install -f \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && gsl2_path="$(find / -name 'libgsl.so.19' || printf '')" \ + && if [ -n "$gsl2_path" ]; then \ + ln -sfv "$gsl2_path" "$(dirname $gsl2_path)/libgsl.so.0"; \ + fi \ + && ldconfig \ + && echo "Downloading AFNI ..." \ + && mkdir -p /opt/afni-latest \ + && curl -fsSL --retry 5 https://afni.nimh.nih.gov/pub/dist/tgz/linux_openmp_64.tgz \ + | tar -xz -C /opt/afni-latest --strip-components 1 \ + && PATH=$PATH:/opt/afni-latest rPkgsInstall -pkgs ALL + +ENV FORCE_SPMMCR="1" \ + SPM_HTML_BROWSER="0" \ + LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/x86_64-linux-gnu:/opt/matlabmcr-2010a/v713/runtime/glnxa64:/opt/matlabmcr-2010a/v713/bin/glnxa64:/opt/matlabmcr-2010a/v713/sys/os/glnxa64:/opt/matlabmcr-2010a/v713/extern/bin/glnxa64" \ + MATLABCMD="/opt/matlabmcr-2010a/v713/toolbox/matlab" +RUN export TMPDIR="$(mktemp -d)" \ + && apt-get update -qq \ + && apt-get install -y -q --no-install-recommends \ + bc \ + libncurses5 \ + libxext6 \ + libxmu6 \ + libxpm-dev \ + libxt6 \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && echo "Downloading MATLAB Compiler Runtime ..." \ + && curl -sSL --retry 5 -o /tmp/toinstall.deb http://mirrors.kernel.org/debian/pool/main/libx/libxp/libxp6_1.0.2-2_amd64.deb \ + && dpkg -i /tmp/toinstall.deb \ + && rm /tmp/toinstall.deb \ + && apt-get install -f \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && curl -fsSL --retry 5 -o "$TMPDIR/MCRInstaller.bin" https://dl.dropbox.com/s/zz6me0c3v4yq5fd/MCR_R2010a_glnxa64_installer.bin \ + && chmod +x "$TMPDIR/MCRInstaller.bin" \ + && "$TMPDIR/MCRInstaller.bin" -silent -P installLocation="/opt/matlabmcr-2010a" \ + && rm -rf "$TMPDIR" \ + && unset TMPDIR \ + && echo "Downloading standalone SPM ..." \ + && curl -fsSL --retry 5 -o /tmp/spm12.zip https://www.fil.ion.ucl.ac.uk/spm/download/restricted/utopia/previous/spm12_r7771_R2010a.zip \ + && unzip -q /tmp/spm12.zip -d /tmp \ + && mkdir -p /opt/spm12-r7771 \ + && mv /tmp/spm12/* /opt/spm12-r7771/ \ + && chmod -R 777 /opt/spm12-r7771 \ + && rm -rf /tmp/spm* \ + && /opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 quit \ + && sed -i '$iexport SPMMCRCMD=\"/opt/spm12-r7771/run_spm12.sh /opt/matlabmcr-2010a/v713 script\"' $ND_ENTRYPOINT + +RUN test "$(getent passwd neuro)" || useradd --no-user-group --create-home --shell /bin/bash neuro USER neuro + +WORKDIR /home + +ENV CONDA_DIR="/opt/miniconda-latest" \ + PATH="/opt/miniconda-latest/bin:$PATH" +RUN export PATH="/opt/miniconda-latest/bin:$PATH" \ + && echo "Downloading Miniconda installer ..." \ + && conda_installer="/tmp/miniconda.sh" \ + && curl -fsSL --retry 5 -o "$conda_installer" https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh \ + && bash "$conda_installer" -b -p /opt/miniconda-latest \ + && rm -f "$conda_installer" \ + && conda update -yq -nbase conda \ + && conda config --system --prepend channels conda-forge \ + && conda config --system --set auto_update_conda false \ + && conda config --system --set show_channel_urls true \ + && sync && conda clean -y --all && sync \ + && conda create -y -q --name neuro \ + && conda install -y -q --name neuro \ + "python=3.8" \ + "traits" \ + "jupyter" \ + "nilearn" \ + "graphviz" \ + "nipype" \ + "scikit-image" \ + && sync && conda clean -y --all && sync \ + && bash -c "source activate neuro \ + && pip install --no-cache-dir \ + "matplotlib"" \ + && rm -rf ~/.cache/pip/* \ + && sync \ + && sed -i '$isource activate neuro' $ND_ENTRYPOINT + +ENV LD_LIBRARY_PATH="/opt/miniconda-latest/envs/neuro:" + +RUN bash -c 'source activate neuro' + +USER root + +RUN chmod 777 -Rf /home + +RUN chown -R neuro /home + +USER neuro + +RUN mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \"0.0.0.0\" > ~/.jupyter/jupyter_notebook_config.py + +RUN echo '{ \ + \n "pkg_manager": "apt", \ + \n "instructions": [ \ + \n [ \ + \n "base", \ + \n "neurodebian:stretch-non-free" \ + \n ], \ + \n [ \ + \n "install", \ + \n [ \ + \n "git" \ + \n ] \ + \n ], \ + \n [ \ + \n "fsl", \ + \n { \ + \n "version": "6.0.3" \ + \n } \ + \n ], \ + \n [ \ + \n "afni", \ + \n { \ + \n "version": "latest", \ + \n "method": "binaries", \ + \n "install_r": "true", \ + \n "install_r_pkgs": "true", \ + \n "install_python2": "true", \ + \n "install_python3": "true" \ + \n } \ + \n ], \ + \n [ \ + \n "spm12", \ + \n { \ + \n "version": "r7771", \ + \n "method": "binaries" \ + \n } \ + \n ], \ + \n [ \ + \n "user", \ + \n "neuro" \ + \n ], \ + \n [ \ + \n "workdir", \ + \n "/home" \ + \n ], \ + \n [ \ + \n "miniconda", \ + \n { \ + \n "create_env": "neuro", \ + \n "conda_install": [ \ + \n "python=3.8", \ + \n "traits", \ + \n "jupyter", \ + \n "nilearn", \ + \n "graphviz", \ + \n "nipype", \ + \n "scikit-image" \ + \n ], \ + \n "pip_install": [ \ + \n "matplotlib" \ + \n ], \ + \n "activate": true \ + \n } \ + \n ], \ + \n [ \ + \n "env", \ + \n { \ + \n "LD_LIBRARY_PATH": "/opt/miniconda-latest/envs/neuro:" \ + \n } \ + \n ], \ + \n [ \ + \n "run_bash", \ + \n "source activate neuro" \ + \n ], \ + \n [ \ + \n "user", \ + \n "root" \ + \n ], \ + \n [ \ + \n "run", \ + \n "chmod 777 -Rf /home" \ + \n ], \ + \n [ \ + \n "run", \ + \n "chown -R neuro /home" \ + \n ], \ + \n [ \ + \n "user", \ + \n "neuro" \ + \n ], \ + \n [ \ + \n "run", \ + \n "mkdir -p ~/.jupyter && echo c.NotebookApp.ip = \\\"0.0.0.0\\\" > ~/.jupyter/jupyter_notebook_config.py" \ + \n ] \ + \n ] \ + \n}' > /neurodocker/neurodocker_specs.json From c2118196c410b3682bd51d8fcbc4f9750fdf2e71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 17 Jun 2024 15:34:57 +0200 Subject: [PATCH 10/17] Adding nuisance regressors in the group level analysis --- narps_open/pipelines/team_4SZ2.py | 256 +++++++----------------------- tests/pipelines/test_team_4SZ2.py | 41 ++--- 2 files changed, 70 insertions(+), 227 deletions(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index 07ceed47..3a536225 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -6,6 +6,8 @@ from os.path import join from itertools import product +from numpy import array + from nipype import Workflow, Node, MapNode from nipype.interfaces.utility import IdentityInterface, Function, Split from nipype.interfaces.io import SelectFiles, DataSink @@ -15,12 +17,12 @@ FSLCommand, Cluster ) from nipype.algorithms.modelgen import SpecifyModel -from nipype.interfaces.fsl.maths import MultiImageMaths +from nipype.interfaces.fsl.maths import MathsCommand from narps_open.utils.configuration import Configuration from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation -from narps_open.data.participants import get_group +from narps_open.data.participants import get_group, get_participants_information from narps_open.core.common import list_intersection, elements_in_string, clean_list from narps_open.core.interfaces import InterfaceFactory @@ -39,6 +41,11 @@ def __init__(self): ('effect_of_gain', 'T', ['gain', 'loss'], [1, 0]), ('effect_of_loss', 'T', ['gain', 'loss'], [0, 1]) ] + self.group_level_contrasts = [ + ('group_equal_indifference', 'T', ['equalIndifference', 'equalRange'], [1, 0]), + ('group_equal_range', 'T', ['equalIndifference', 'equalRange'], [0, 1]), + ('group_comp', 'T', ['equalIndifference', 'equalRange'], [-1, 1]) + ] def get_preprocessing(self): """ No preprocessing has been done by team 4SZ2 """ @@ -197,59 +204,45 @@ def get_subject_level_analysis(self): """ No subject level analysis has been done by team 4SZ2 """ return None - def get_one_sample_t_test_regressors(subject_list: list) -> dict: - """ - Create dictionary of regressors for one sample t-test group analysis. - - Parameters: - - subject_list: ids of subject in the group for which to do the analysis - - Returns: - - dict containing named lists of regressors. - """ - - return dict(group_mean = [1 for _ in subject_list]) - - def get_two_sample_t_test_regressors( - equal_range_ids: list, - equal_indifference_ids: list, - subject_list: list, - run_list: list - ) -> dict: + @staticmethod + def get_group_level_regressors(subject_list: list): """ Create dictionary of regressors for two sample t-test group analysis. Parameters: - - equal_range_ids: ids of subjects in equal range group - - equal_indifference_ids: ids of subjects in equal indifference group - subject_list: ids of subject for which to do the analysis - - run_list: ids of runs for which to do the analysis + Returns: - regressors, dict: containing named lists of regressors. - groups, list: group identifiers to distinguish groups in FSL analysis. """ - # Create 2 lists containing a value for each run, which is + # Create lists containing regressors for each group (equalRange, equalIndifference) # * 1 if the participant is on the group # * 0 otherwise - equal_range_regressors = [] - equal_indifference_regressors = [] - - for subject_id in subject_list: - value_er = 1 if subject_id in equal_range_ids else 0 - value_ei = 1 if subject_id in equal_indifference_ids else 0 - for _ in run_list: - equal_range_regressors.append(value_er) - equal_indifference_regressors.append(value_ei) + equal_range_group = get_group('equalRange') + equal_indif_group = get_group('equalIndifference') + equal_range_regressor = [1 if s in equal_range_group else 0 for s in subject_list] + equal_indif_regressor = [1 if s in equal_indif_group else 0 for s in subject_list] + + # Get gender and age of participants + participants_data = get_participants_information()[['participant_id', 'gender', 'age']] + participants = participants_data.loc[ + participants_data['participant_id'].isin([f'sub-{s}' for s in subject_list]) + ] + ages = array(participants['age']) + genders = array(participants['gender']) - # Create regressors output : a dict with the two list + # Create regressors output regressors = dict( - equalRange = equal_range_regressors, - equalIndifference = equal_indifference_regressors + equalIndifference = equal_indif_regressor, + equalRange = equal_range_regressor, + age = [int(a) for a in ages], + gender = [1 if i == 'F' else 0 for i in genders] ) - # Create groups outputs : a list with 1 for equalRange subjects and 2 for equalIndifference - groups = [1 if i == 1 else 2 for i in equal_range_regressors] + # Create groups outputs + groups = [1 if i == 1 else 2 for i in equal_range_regressor] return regressors, groups @@ -260,32 +253,13 @@ def get_group_level_analysis(self): Returns; - a list of nipype.WorkFlow """ - - methods = ['equalRange', 'equalIndifference', 'groupComp'] - return [self.get_group_level_analysis_sub_workflow(method) for method in methods] - - def get_group_level_analysis_sub_workflow(self, method): - """ - Return a workflow for the group level analysis. - - Parameters: - - method: one of 'equalRange', 'equalIndifference' or 'groupComp' - - Returns: - - group_level: nipype.WorkFlow - """ # Compute the number of participants in the analysis nb_subjects = len(self.subject_list) - # Compute the number of participants in the group - nb_subjects_in_group = nb_subjects - if method in ['equalIndifference', 'equalRange']: - nb_subjects_in_group = len([s for s in self.subject_list if s in get_group(method)]) - # Declare the workflow group_level = Workflow( base_dir = self.directories.working_dir, - name = f'group_level_analysis_{method}_nsub_{nb_subjects}') + name = f'group_level_analysis_nsub_{nb_subjects}') # Infosource Node - iterate over the contrasts generated by the subject level analysis information_source = Node(IdentityInterface( @@ -323,6 +297,7 @@ def get_group_level_analysis_sub_workflow(self, method): ), name = 'get_copes', iterfield = 'input_str' ) + get_copes.inputs.elements = complete_subject_ids(self.subject_list) group_level.connect(select_files, 'cope', get_copes, 'input_str') # Function Node elements_in_string @@ -336,6 +311,7 @@ def get_group_level_analysis_sub_workflow(self, method): ), name = 'get_varcopes', iterfield = 'input_str' ) + get_varcopes.inputs.elements = complete_subject_ids(self.subject_list) group_level.connect(select_files, 'varcope', get_varcopes, 'input_str') # Function Node elements_in_string @@ -349,6 +325,7 @@ def get_group_level_analysis_sub_workflow(self, method): ), name = 'get_masks', iterfield = 'input_str' ) + get_masks.inputs.elements = complete_subject_ids(self.subject_list) group_level.connect(select_files, 'masks', get_masks, 'input_str') # Merge Node - Merge cope files @@ -356,27 +333,29 @@ def get_group_level_analysis_sub_workflow(self, method): merge_copes.inputs.dimension = 't' group_level.connect(get_copes, ('out_list', clean_list), merge_copes, 'in_files') + # Merge Masks - Merge mask files + merge_masks = Node(Merge(), name = 'merge_masks') + merge_masks.inputs.dimension = 't' + group_level.connect(get_masks, ('out_list', clean_list), merge_masks, 'in_files') + # Merge Node - Merge cope files merge_varcopes = Node(Merge(), name = 'merge_varcopes') merge_varcopes.inputs.dimension = 't' group_level.connect(get_varcopes, ('out_list', clean_list), merge_varcopes, 'in_files') - # Split Node - Split mask list to serve them as inputs of the MultiImageMaths node. - split_masks = Node(Split(), name = 'split_masks') - split_masks.inputs.splits = [1, (nb_subjects_in_group * len(self.run_list)) - 1] - split_masks.inputs.squeeze = True # Unfold one-element splits removing the list - group_level.connect(get_masks, ('out_list', clean_list), split_masks, 'inlist') - - # MultiImageMaths Node - Create a subject mask by + # MathsCommand Node - Create a global mask by # computing the intersection of all run masks. - mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') - mask_intersection.inputs.op_string = '-mul %s ' * \ - ((nb_subjects_in_group * len(self.run_list)) - 1) - group_level.connect(split_masks, 'out1', mask_intersection, 'in_file') - group_level.connect(split_masks, 'out2', mask_intersection, 'operand_files') + mask_intersection = Node(MathsCommand(), name = 'mask_intersection') + mask_intersection.inputs.args = '-Tmin -thr 0.9' + group_level.connect(merge_masks, 'merged_file', mask_intersection, 'in_file') + + # Get regressors for the group level analysis + regressors, groups = self.get_group_level_regressors(self.subject_list) # MultipleRegressDesign Node - Specify model specify_model = Node(MultipleRegressDesign(), name = 'specify_model') + specify_model.inputs.regressors = regressors + specify_model.inputs.groups = groups # FLAMEO Node - Estimate model estimate_model = Node(FLAMEO(), name = 'estimate_model') @@ -404,123 +383,19 @@ def get_group_level_analysis_sub_workflow(self, method): data_sink = Node(DataSink(), name = 'data_sink') data_sink.inputs.base_directory = self.directories.output_dir group_level.connect(estimate_model, 'zstats', data_sink, - f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats') + f'group_level_analysis_nsub_{nb_subjects}.@zstats') group_level.connect(estimate_model, 'tstats', data_sink, - f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats') + f'group_level_analysis_nsub_{nb_subjects}.@tstats') group_level.connect(cluster,'threshold_file', data_sink, - f'group_level_analysis_{method}_nsub_{nb_subjects}.@threshold_file') - - if method in ('equalIndifference', 'equalRange'): - # Setup a one sample t-test - specify_model.inputs.contrasts = [ - ['group_mean', 'T', ['group_mean'], [1]], - ['group_mean_neg', 'T', ['group_mean'], [-1]] - ] - - # Function Node get_group_subjects - Get subjects in the group and in the subject_list - get_group_subjects = Node(Function( - function = list_intersection, - input_names = ['list_1', 'list_2'], - output_names = ['out_list'] - ), - name = 'get_group_subjects' - ) - get_group_subjects.inputs.list_1 = get_group(method) - get_group_subjects.inputs.list_2 = self.subject_list - group_level.connect( - get_group_subjects, ('out_list', complete_subject_ids), get_copes, 'elements') - group_level.connect( - get_group_subjects, ('out_list', complete_subject_ids), get_varcopes, 'elements') - group_level.connect( - get_group_subjects, ('out_list', complete_sub_ids), get_masks, 'elements') - - # Function Node get_one_sample_t_test_regressors - # Get regressors in the equalRange and equalIndifference method case - regressors_one_sample = Node( - Function( - function = self.get_one_sample_t_test_regressors, - input_names = ['subject_list'], - output_names = ['regressors'] - ), - name = 'regressors_one_sample', - ) - regressors_one_sample.inputs.subject_list = range( - nb_subjects_in_group * len(self.run_list)) - group_level.connect(regressors_one_sample, 'regressors', specify_model, 'regressors') - - elif method == 'groupComp': - - # Select copes and varcopes corresponding to the selected subjects - # Indeed the SelectFiles node asks for all (*) subjects available - get_copes.inputs.elements = complete_subject_ids(self.subject_list) - get_varcopes.inputs.elements = complete_subject_ids(self.subject_list) - get_masks.inputs.elements = complete_sub_ids(self.subject_list) - - # Setup a two sample t-test - specify_model.inputs.contrasts = [ - ['equalRange_sup', 'T', ['equalRange', 'equalIndifference'], [1, -1]] - ] - - # Function Node get_equal_range_subjects - # Get subjects in the equalRange group and in the subject_list - get_equal_range_subjects = Node(Function( - function = list_intersection, - input_names = ['list_1', 'list_2'], - output_names = ['out_list'] - ), - name = 'get_equal_range_subjects' - ) - get_equal_range_subjects.inputs.list_1 = get_group('equalRange') - get_equal_range_subjects.inputs.list_2 = self.subject_list - - # Function Node get_equal_indifference_subjects - # Get subjects in the equalIndifference group and in the subject_list - get_equal_indifference_subjects = Node(Function( - function = list_intersection, - input_names = ['list_1', 'list_2'], - output_names = ['out_list'] - ), - name = 'get_equal_indifference_subjects' - ) - get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') - get_equal_indifference_subjects.inputs.list_2 = self.subject_list - - # Function Node get_two_sample_t_test_regressors - # Get regressors in the groupComp method case - regressors_two_sample = Node( - Function( - function = self.get_two_sample_t_test_regressors, - input_names = [ - 'equal_range_ids', - 'equal_indifference_ids', - 'subject_list', - 'run_list' - ], - output_names = ['regressors', 'groups'] - ), - name = 'regressors_two_sample', - ) - regressors_two_sample.inputs.subject_list = self.subject_list - regressors_two_sample.inputs.run_list = self.run_list - - # Add missing connections - group_level.connect( - get_equal_range_subjects, 'out_list', regressors_two_sample, 'equal_range_ids') - group_level.connect( - get_equal_indifference_subjects, 'out_list', - regressors_two_sample, 'equal_indifference_ids') - group_level.connect(regressors_two_sample, 'regressors', specify_model, 'regressors') - group_level.connect(regressors_two_sample, 'groups', specify_model, 'groups') + f'group_level_analysis_nsub_{nb_subjects}.@threshold_file') return group_level def get_group_level_outputs(self): """ Return all names for the files the group level analysis is supposed to generate. """ - # Handle equalRange and equalIndifference parameters = { 'contrast_id': self.contrast_list, - 'method': ['equalRange', 'equalIndifference'], 'file': [ '_cluster0/zstat1_threshold.nii.gz', '_cluster1/zstat2_threshold.nii.gz', @@ -533,34 +408,13 @@ def get_group_level_outputs(self): parameter_sets = product(*parameters.values()) template = join( self.directories.output_dir, - 'group_level_analysis_{method}_nsub_'+f'{len(self.subject_list)}', + 'group_level_analysis_nsub_'+f'{len(self.subject_list)}', '_contrast_id_{contrast_id}', '{file}' ) - return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ - for parameter_values in parameter_sets] - - # Handle groupComp - parameters = { - 'contrast_id': self.contrast_list, - 'file': [ - '_cluster0/zstat1_threshold.nii.gz', - 'tstat1.nii.gz', - 'zstat1.nii.gz' - ] - } - parameter_sets = product(*parameters.values()) - template = join( - self.directories.output_dir, - f'group_level_analysis_groupComp_nsub_{len(self.subject_list)}', - '_contrast_id_{contrast_id}', - '{file}' - ) - return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] - return return_list - def get_hypotheses_outputs(self): """ Return all hypotheses output file names. """ diff --git a/tests/pipelines/test_team_4SZ2.py b/tests/pipelines/test_team_4SZ2.py index 1f3c08ff..8b025db0 100644 --- a/tests/pipelines/test_team_4SZ2.py +++ b/tests/pipelines/test_team_4SZ2.py @@ -38,10 +38,7 @@ def test_create(): assert pipeline.get_preprocessing() is None assert isinstance(pipeline.get_run_level_analysis(), Workflow) assert pipeline.get_subject_level_analysis() is None - group_level = pipeline.get_group_level_analysis() - assert len(group_level) == 3 - for sub_workflow in group_level: - assert isinstance(sub_workflow, Workflow) + assert isinstance(pipeline.get_group_level_analysis(), Workflow) @staticmethod @mark.unit_test @@ -88,28 +85,20 @@ def test_subject_information(): @staticmethod @mark.unit_test - def test_one_sample_t_test_regressors(): - """ Test the get_one_sample_t_test_regressors method """ - - result = PipelineTeam4SZ2.get_one_sample_t_test_regressors(['001', '002', '003', '004']) - assert result == {'group_mean' : [1]*4} - - @staticmethod - @mark.unit_test - def test_two_sample_t_test_regressors(): - """ Test the get_two_sample_t_test_regressors method """ - - result_1, result_2 = PipelineTeam4SZ2.get_two_sample_t_test_regressors( - ['001', '003'], # equal_range_ids - ['002', '004'], # equal_indifference_ids - ['001', '002', '003', '004'], # subject_list - ['01', '02'] # run_list - ) - assert result_1 == { - 'equalRange' : [1, 1, 0, 0, 1, 1, 0, 0], - 'equalIndifference' : [0, 0, 1, 1, 0, 0, 1, 1] - } - assert result_2 == [1, 1, 2, 2, 1, 1, 2, 2] + def test_get_group_level_regressors(): + """ Test the get_group_level_regressors method """ + + regressors, groups = PipelineTeam4SZ2.get_group_level_regressors(['001', '002', '003', '004']) + + for k1, k2 in zip(regressors.keys(), ['equalIndifference', 'equalRange', 'age', 'gender']): + assert k1 == k2 + assert regressors['equalIndifference'] == [1, 0, 1, 0] + assert regressors['equalRange'] == [0, 1, 0, 1] + print(regressors['age']) + print(regressors['gender']) + assert regressors['age'] == [24, 25, 27, 25] + assert regressors['gender'] == [0, 0, 1, 0] + assert groups == [2, 1, 2, 1] @staticmethod @mark.pipeline_test From 8ed2c3ea83a78a4fc54fbfb259dc87940a66b077 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 17 Jun 2024 15:39:21 +0200 Subject: [PATCH 11/17] Adding nuisance regressors in the group level analysis --- narps_open/pipelines/team_4SZ2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index 3a536225..96741ef2 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -44,7 +44,7 @@ def __init__(self): self.group_level_contrasts = [ ('group_equal_indifference', 'T', ['equalIndifference', 'equalRange'], [1, 0]), ('group_equal_range', 'T', ['equalIndifference', 'equalRange'], [0, 1]), - ('group_comp', 'T', ['equalIndifference', 'equalRange'], [-1, 1]) + ('group_comparison', 'T', ['equalIndifference', 'equalRange'], [-1, 1]) ] def get_preprocessing(self): @@ -356,6 +356,7 @@ def get_group_level_analysis(self): specify_model = Node(MultipleRegressDesign(), name = 'specify_model') specify_model.inputs.regressors = regressors specify_model.inputs.groups = groups + specify_model.inputs.contrasts = self.group_level_contrasts # FLAMEO Node - Estimate model estimate_model = Node(FLAMEO(), name = 'estimate_model') From 0dda6cc28e06488c338b741c443c76c5ff3671b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 17 Jun 2024 15:48:37 +0200 Subject: [PATCH 12/17] Adding nuisance regressors in the group level analysis --- narps_open/pipelines/team_4SZ2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index 96741ef2..05e4a753 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -325,7 +325,7 @@ def get_group_level_analysis(self): ), name = 'get_masks', iterfield = 'input_str' ) - get_masks.inputs.elements = complete_subject_ids(self.subject_list) + get_masks.inputs.elements = complete_sub_ids(self.subject_list) group_level.connect(select_files, 'masks', get_masks, 'input_str') # Merge Node - Merge cope files From 4722eb020618f7e6df6cf97521ed3b81a31f061a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 19 Jun 2024 17:18:55 +0200 Subject: [PATCH 13/17] Adding nuisance regressors in the group level analysis --- narps_open/pipelines/team_4SZ2.py | 11 ++++++----- tests/pipelines/test_team_4SZ2.py | 16 ++++++++-------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index 05e4a753..738c449a 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -205,12 +205,13 @@ def get_subject_level_analysis(self): return None @staticmethod - def get_group_level_regressors(subject_list: list): + def get_group_level_regressors(subject_list: list, run_list: list): """ Create dictionary of regressors for two sample t-test group analysis. Parameters: - subject_list: ids of subject for which to do the analysis + - run_list: ids of runs for which to do the analysis Returns: - regressors, dict: containing named lists of regressors. @@ -222,8 +223,8 @@ def get_group_level_regressors(subject_list: list): # * 0 otherwise equal_range_group = get_group('equalRange') equal_indif_group = get_group('equalIndifference') - equal_range_regressor = [1 if s in equal_range_group else 0 for s in subject_list] - equal_indif_regressor = [1 if s in equal_indif_group else 0 for s in subject_list] + equal_range_regressor = [1 if s in equal_range_group else 0 for s in subject_list for _ in run_list] + equal_indif_regressor = [1 if s in equal_indif_group else 0 for s in subject_list for _ in run_list] # Get gender and age of participants participants_data = get_participants_information()[['participant_id', 'gender', 'age']] @@ -237,8 +238,8 @@ def get_group_level_regressors(subject_list: list): regressors = dict( equalIndifference = equal_indif_regressor, equalRange = equal_range_regressor, - age = [int(a) for a in ages], - gender = [1 if i == 'F' else 0 for i in genders] + age = [int(a) for a in ages for _ in run_list], + gender = [1 if i == 'F' else 0 for i in genders for _ in run_list] ) # Create groups outputs diff --git a/tests/pipelines/test_team_4SZ2.py b/tests/pipelines/test_team_4SZ2.py index 8b025db0..69a581be 100644 --- a/tests/pipelines/test_team_4SZ2.py +++ b/tests/pipelines/test_team_4SZ2.py @@ -88,17 +88,17 @@ def test_subject_information(): def test_get_group_level_regressors(): """ Test the get_group_level_regressors method """ - regressors, groups = PipelineTeam4SZ2.get_group_level_regressors(['001', '002', '003', '004']) + regressors, groups = PipelineTeam4SZ2.get_group_level_regressors( + ['001', '002', '003', '004'], + ['01', '02']) for k1, k2 in zip(regressors.keys(), ['equalIndifference', 'equalRange', 'age', 'gender']): assert k1 == k2 - assert regressors['equalIndifference'] == [1, 0, 1, 0] - assert regressors['equalRange'] == [0, 1, 0, 1] - print(regressors['age']) - print(regressors['gender']) - assert regressors['age'] == [24, 25, 27, 25] - assert regressors['gender'] == [0, 0, 1, 0] - assert groups == [2, 1, 2, 1] + assert regressors['equalIndifference'] == [1, 1, 0, 0, 1, 1, 0, 0] + assert regressors['equalRange'] == [0, 0, 1, 1, 0, 0, 1, 1] + assert regressors['age'] == [24, 24, 25, 25, 27, 27, 25, 25] + assert regressors['gender'] == [0, 0, 0, 0, 1, 1, 0, 0] + assert groups == [2, 2, 1, 1, 2, 2, 1, 1] @staticmethod @mark.pipeline_test From 40b226fb99fc4e18bba75c6db2161d12369c2d66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 19 Jun 2024 17:21:01 +0200 Subject: [PATCH 14/17] Adding nuisance regressors in the group level analysis --- narps_open/pipelines/team_4SZ2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index 738c449a..c9c37580 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -351,7 +351,7 @@ def get_group_level_analysis(self): group_level.connect(merge_masks, 'merged_file', mask_intersection, 'in_file') # Get regressors for the group level analysis - regressors, groups = self.get_group_level_regressors(self.subject_list) + regressors, groups = self.get_group_level_regressors(self.subject_list, self.run_list) # MultipleRegressDesign Node - Specify model specify_model = Node(MultipleRegressDesign(), name = 'specify_model') From 39b9b8ccfe22f51d2b196591bfc045a86ce4f91a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 19 Jun 2024 17:34:06 +0200 Subject: [PATCH 15/17] Adding nuisance regressors in the group level analysis --- narps_open/pipelines/team_4SZ2.py | 16 ++++++++-------- tests/pipelines/test_team_4SZ2.py | 3 +-- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index c9c37580..dbf23353 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -223,8 +223,10 @@ def get_group_level_regressors(subject_list: list, run_list: list): # * 0 otherwise equal_range_group = get_group('equalRange') equal_indif_group = get_group('equalIndifference') - equal_range_regressor = [1 if s in equal_range_group else 0 for s in subject_list for _ in run_list] - equal_indif_regressor = [1 if s in equal_indif_group else 0 for s in subject_list for _ in run_list] + equal_range_regressor = [ + 1 if s in equal_range_group else 0 for s in subject_list for _ in run_list] + equal_indif_regressor = [ + 1 if s in equal_indif_group else 0 for s in subject_list for _ in run_list] # Get gender and age of participants participants_data = get_participants_information()[['participant_id', 'gender', 'age']] @@ -242,10 +244,7 @@ def get_group_level_regressors(subject_list: list, run_list: list): gender = [1 if i == 'F' else 0 for i in genders for _ in run_list] ) - # Create groups outputs - groups = [1 if i == 1 else 2 for i in equal_range_regressor] - - return regressors, groups + return regressors def get_group_level_analysis(self): """ @@ -351,12 +350,13 @@ def get_group_level_analysis(self): group_level.connect(merge_masks, 'merged_file', mask_intersection, 'in_file') # Get regressors for the group level analysis - regressors, groups = self.get_group_level_regressors(self.subject_list, self.run_list) + regressors = self.get_group_level_regressors(self.subject_list, self.run_list) # MultipleRegressDesign Node - Specify model + # NB : no "groups" input is needed because equalRange and equalIndifference groups + # are already modeled in the design specify_model = Node(MultipleRegressDesign(), name = 'specify_model') specify_model.inputs.regressors = regressors - specify_model.inputs.groups = groups specify_model.inputs.contrasts = self.group_level_contrasts # FLAMEO Node - Estimate model diff --git a/tests/pipelines/test_team_4SZ2.py b/tests/pipelines/test_team_4SZ2.py index 69a581be..2328885e 100644 --- a/tests/pipelines/test_team_4SZ2.py +++ b/tests/pipelines/test_team_4SZ2.py @@ -88,7 +88,7 @@ def test_subject_information(): def test_get_group_level_regressors(): """ Test the get_group_level_regressors method """ - regressors, groups = PipelineTeam4SZ2.get_group_level_regressors( + regressors = PipelineTeam4SZ2.get_group_level_regressors( ['001', '002', '003', '004'], ['01', '02']) @@ -98,7 +98,6 @@ def test_get_group_level_regressors(): assert regressors['equalRange'] == [0, 0, 1, 1, 0, 0, 1, 1] assert regressors['age'] == [24, 24, 25, 25, 27, 27, 25, 25] assert regressors['gender'] == [0, 0, 0, 0, 1, 1, 0, 0] - assert groups == [2, 2, 1, 1, 2, 2, 1, 1] @staticmethod @mark.pipeline_test From 4279a80dd9529d0ad8d71de7e5ed0cfac626c673 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 19 Jun 2024 17:54:58 +0200 Subject: [PATCH 16/17] Changing group level outputs --- narps_open/pipelines/team_4SZ2.py | 69 ++++++++++++++++--------------- tests/pipelines/test_team_4SZ2.py | 4 +- 2 files changed, 38 insertions(+), 35 deletions(-) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index dbf23353..d5c9ceb2 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -401,10 +401,13 @@ def get_group_level_outputs(self): 'file': [ '_cluster0/zstat1_threshold.nii.gz', '_cluster1/zstat2_threshold.nii.gz', + '_cluster2/zstat3_threshold.nii.gz', 'tstat1.nii.gz', 'tstat2.nii.gz', + 'tstat3.nii.gz', 'zstat1.nii.gz', - 'zstat2.nii.gz' + 'zstat2.nii.gz', + 'zstat3.nii.gz' ] } parameter_sets = product(*parameters.values()) @@ -422,41 +425,41 @@ def get_hypotheses_outputs(self): nb_sub = len(self.subject_list) files = [ - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + # Hypothesis 1 + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_1', 'zstat1.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_1', 'zstat1.nii.gz'), + # Hypothesis 2 + join(f'group_level_analysis_nsub_{nb_sub}', + '_contrast_id_1', '_cluster1', 'zstat2_threshold.nii.gz'), + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_1', 'zstat2.nii.gz'), + # Hypothesis 3 + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_1', 'zstat1.nii.gz'), - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_1', 'zstat1.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_1', '_cluster0', 'zstat1_threshold.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_1', 'zstat1.nii.gz'), - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_2', '_cluster1', 'zstat2_threshold.nii.gz'), - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_2', 'zstat2.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_2', '_cluster1', 'zstat2_threshold.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_2', 'zstat2.nii.gz'), - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_1', 'zstat1.nii.gz'), + # Hypothesis 4 + join(f'group_level_analysis_nsub_{nb_sub}', + '_contrast_id_1', '_cluster1', 'zstat2_threshold.nii.gz'), + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_1', 'zstat2.nii.gz'), + # Hypothesis 5 + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), - join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_2', 'zstat1.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), - join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_2', 'zstat1.nii.gz'), - join(f'group_level_analysis_groupComp_nsub_{nb_sub}', + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_2', 'zstat1.nii.gz'), + # Hypothesis 6 + join(f'group_level_analysis_nsub_{nb_sub}', + '_contrast_id_2', '_cluster1', 'zstat2_threshold.nii.gz'), + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_2', 'zstat2.nii.gz'), + # Hypothesis 7 + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_2', '_cluster0', 'zstat1_threshold.nii.gz'), - join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_2', 'zstat1.nii.gz') + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_2', 'zstat1.nii.gz'), + # Hypothesis 8 + join(f'group_level_analysis_nsub_{nb_sub}', + '_contrast_id_2', '_cluster1', 'zstat2_threshold.nii.gz'), + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_2', 'zstat2.nii.gz'), + # Hypothesis 9 + join(f'group_level_analysis_nsub_{nb_sub}', + '_contrast_id_2', '_cluster2', 'zstat3_threshold.nii.gz'), + join(f'group_level_analysis_nsub_{nb_sub}', '_contrast_id_2', 'zstat3.nii.gz') ] return [join(self.directories.output_dir, f) for f in files] diff --git a/tests/pipelines/test_team_4SZ2.py b/tests/pipelines/test_team_4SZ2.py index 2328885e..68ebb9c7 100644 --- a/tests/pipelines/test_team_4SZ2.py +++ b/tests/pipelines/test_team_4SZ2.py @@ -49,11 +49,11 @@ def test_outputs(): # 1 - 1 subject outputs pipeline.subject_list = ['001'] - helpers.test_pipeline_outputs(pipeline, [0, 2*4*1*4, 0, 6*2*2 + 3*2, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 2*4*1*4, 0, 2*9, 18]) # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - helpers.test_pipeline_outputs(pipeline, [0, 2*4*4*4, 0, 6*2*2 + 3*2, 18]) + helpers.test_pipeline_outputs(pipeline, [0, 2*4*4*4, 0, 2*9, 18]) @staticmethod @mark.unit_test From 3ff9cf4a0057993cf33fa3839e6d1fe129d3f209 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 21 Jun 2024 13:31:04 +0200 Subject: [PATCH 17/17] FILMGLS prewhittens data --- narps_open/pipelines/team_4SZ2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/narps_open/pipelines/team_4SZ2.py b/narps_open/pipelines/team_4SZ2.py index d5c9ceb2..76f80825 100644 --- a/narps_open/pipelines/team_4SZ2.py +++ b/narps_open/pipelines/team_4SZ2.py @@ -157,6 +157,7 @@ def get_run_level_analysis(self): # FILMGLS Node - Estimate first level model model_estimate = Node(FILMGLS(), name='model_estimate') + model_estimate.inputs.output_pwdata = True run_level.connect(smoothing_func, 'out_file', model_estimate, 'in_file') run_level.connect(model_generation, 'con_file', model_estimate, 'tcon_file') run_level.connect(model_generation, 'design_file', model_estimate, 'design_file')